qubes-linux-kernel/patches.suse/kdb-x86

27128 lines
790 KiB
Plaintext
Raw Normal View History

From: Martin Hicks <mort@sgi.com>
Date: Mon, 07 Dec 2009 11:52:50 -0600
Subject: kdb-v4.4-2.6.32-x86-3
References: FATE#303971
X-URL: ftp://oss.sgi.com/www/projects/kdb/download/v4.4/
Patch-mainline: Not yet
The KDB x86 code.
Acked-by: Jeff Mahoney <jeffm@suse.com>
---
arch/x86/Kconfig.debug | 87
arch/x86/Makefile | 3
arch/x86/include/asm/ansidecl.h | 5
arch/x86/include/asm/ansidecl_32.h | 383 ++
arch/x86/include/asm/ansidecl_64.h | 383 ++
arch/x86/include/asm/bfd.h | 5
arch/x86/include/asm/bfd_32.h | 4921 +++++++++++++++++++++++++++++++
arch/x86/include/asm/bfd_64.h | 4917 +++++++++++++++++++++++++++++++
arch/x86/include/asm/irq_vectors.h | 7
arch/x86/include/asm/kdb.h | 140
arch/x86/include/asm/kdbprivate.h | 241 +
arch/x86/include/asm/kdebug.h | 2
arch/x86/include/asm/ptrace.h | 23
arch/x86/kdb/ChangeLog | 262 +
arch/x86/kdb/ChangeLog_32 | 865 +++++
arch/x86/kdb/ChangeLog_64 | 447 ++
arch/x86/kdb/Makefile | 29
arch/x86/kdb/kdb_cmds_32 | 17
arch/x86/kdb/kdb_cmds_64 | 18
arch/x86/kdb/kdba_bp.c | 914 +++++
arch/x86/kdb/kdba_bt.c | 5757 +++++++++++++++++++++++++++++++++++++
arch/x86/kdb/kdba_id.c | 261 +
arch/x86/kdb/kdba_io.c | 666 ++++
arch/x86/kdb/kdba_support.c | 1536 +++++++++
arch/x86/kdb/pc_keyb.h | 137
arch/x86/kdb/x86-dis.c | 4688 ++++++++++++++++++++++++++++++
arch/x86/kernel/apic/io_apic.c | 8
arch/x86/kernel/dumpstack.c | 12
arch/x86/kernel/entry_32.S | 20
arch/x86/kernel/entry_64.S | 27
arch/x86/kernel/reboot.c | 35
arch/x86/kernel/traps.c | 27
32 files changed, 26843 insertions(+)
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -310,4 +310,91 @@ config DEBUG_STRICT_USER_COPY_CHECKS
If unsure, or if you run an older (pre 4.4) gcc, say N.
+config KDB
+ bool "Built-in Kernel Debugger support"
+ depends on DEBUG_KERNEL
+ select KALLSYMS
+ select KALLSYMS_ALL
+ help
+ This option provides a built-in kernel debugger. The built-in
+ kernel debugger contains commands which allow memory to be examined,
+ instructions to be disassembled and breakpoints to be set. For details,
+ see Documentation/kdb/kdb.mm and the manual pages kdb_bt, kdb_ss, etc.
+ Kdb can also be used via the serial port. Set up the system to
+ have a serial console (see Documentation/serial-console.txt).
+ The key sequence <escape>KDB on the serial port will cause the
+ kernel debugger to be entered with input from the serial port and
+ output to the serial console. If unsure, say N.
+
+config KDB_MODULES
+ tristate "KDB modules"
+ depends on KDB
+ help
+ KDB can be extended by adding your own modules, in directory
+ kdb/modules. This option selects the way that these modules should
+ be compiled, as free standing modules (select M) or built into the
+ kernel (select Y). If unsure say M.
+
+config KDB_OFF
+ bool "KDB off by default"
+ depends on KDB
+ help
+ Normally kdb is activated by default, as long as CONFIG_KDB is set.
+ If you want to ship a kernel with kdb support but only have kdb
+ turned on when the user requests it then select this option. When
+ compiled with CONFIG_KDB_OFF, kdb ignores all events unless you boot
+ with kdb=on or you echo "1" > /proc/sys/kernel/kdb. This option also
+ works in reverse, if kdb is normally activated, you can boot with
+ kdb=off or echo "0" > /proc/sys/kernel/kdb to deactivate kdb. If
+ unsure, say N.
+
+config KDB_CONTINUE_CATASTROPHIC
+ int "KDB continues after catastrophic errors"
+ depends on KDB
+ default "0"
+ help
+ This integer controls the behaviour of kdb when the kernel gets a
+ catastrophic error, i.e. for a panic, oops, NMI or other watchdog
+ tripping. CONFIG_KDB_CONTINUE_CATASTROPHIC interacts with
+ /proc/sys/kernel/kdb and CONFIG_LKCD_DUMP (if your kernel has the
+ LKCD patch).
+ When KDB is active (/proc/sys/kernel/kdb == 1) and a catastrophic
+ error occurs, nothing extra happens until you type 'go'.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
+ you type 'go', kdb warns you. The second time you type 'go', KDB
+ tries to continue - no guarantees that the kernel is still usable.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue - no
+ guarantees that the kernel is still usable.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
+ patch and LKCD is configured to take a dump then KDB forces a dump.
+ Whether or not a dump is taken, KDB forces a reboot.
+ When KDB is not active (/proc/sys/kernel/kdb == 0) and a catastrophic
+ error occurs, the following steps are automatic, no human
+ intervention is required.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default) or 1. KDB attempts
+ to continue - no guarantees that the kernel is still usable.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
+ patch and LKCD is configured to take a dump then KDB automatically
+ forces a dump. Whether or not a dump is taken, KDB forces a
+ reboot.
+ If you are not sure, say 0. Read Documentation/kdb/dump.txt before
+ setting to 2.
+
+config KDB_USB
+ bool "Support for USB Keyboard in KDB"
+ depends on KDB && (USB_OHCI_HCD || USB_EHCI_HCD || USB_UHCI_HCD)
+ help
+ If you want to use kdb from USB keyboards then say Y here. If you
+ say N then kdb can only be used from a PC (AT) keyboard or a serial
+ console.
+
+config KDB_KDUMP
+ bool "Support for Kdump in KDB"
+ depends on KDB
+ select KEXEC
+ default N
+ help
+ If you want to take Kdump kernel vmcore from KDB then say Y here.
+ If unsure, say N.
+
endmenu
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -137,6 +137,9 @@ drivers-$(CONFIG_PM) += arch/x86/power/
drivers-$(CONFIG_FB) += arch/x86/video/
+# KDB support
+drivers-$(CONFIG_KDB) += arch/x86/kdb/
+
####
# boot loader support. Several targets are kept for legacy purposes
--- /dev/null
+++ b/arch/x86/include/asm/ansidecl.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include "ansidecl_32.h"
+#else
+# include "ansidecl_64.h"
+#endif
--- /dev/null
+++ b/arch/x86/include/asm/ansidecl_32.h
@@ -0,0 +1,383 @@
+/* ANSI and traditional C compatability macros
+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
+ * required.
+ * Keith Owens <kaos@sgi.com> 15 May 2006
+ */
+
+/* ANSI and traditional C compatibility macros
+
+ ANSI C is assumed if __STDC__ is #defined.
+
+ Macro ANSI C definition Traditional C definition
+ ----- ---- - ---------- ----------- - ----------
+ ANSI_PROTOTYPES 1 not defined
+ PTR `void *' `char *'
+ PTRCONST `void *const' `char *'
+ LONG_DOUBLE `long double' `double'
+ const not defined `'
+ volatile not defined `'
+ signed not defined `'
+ VA_START(ap, var) va_start(ap, var) va_start(ap)
+
+ Note that it is safe to write "void foo();" indicating a function
+ with no return value, in all K+R compilers we have been able to test.
+
+ For declaring functions with prototypes, we also provide these:
+
+ PARAMS ((prototype))
+ -- for functions which take a fixed number of arguments. Use this
+ when declaring the function. When defining the function, write a
+ K+R style argument list. For example:
+
+ char *strcpy PARAMS ((char *dest, char *source));
+ ...
+ char *
+ strcpy (dest, source)
+ char *dest;
+ char *source;
+ { ... }
+
+
+ VPARAMS ((prototype, ...))
+ -- for functions which take a variable number of arguments. Use
+ PARAMS to declare the function, VPARAMS to define it. For example:
+
+ int printf PARAMS ((const char *format, ...));
+ ...
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ ...
+ }
+
+ For writing functions which take variable numbers of arguments, we
+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
+ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
+ thoroughly than the simple VA_START() macro mentioned above.
+
+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
+ corresponding to the list of fixed arguments. Then use va_arg
+ normally to get the variable arguments, or pass your va_list object
+ around. You do not declare the va_list yourself; VA_OPEN does it
+ for you.
+
+ Here is a complete example:
+
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ int result;
+
+ VA_OPEN (ap, format);
+ VA_FIXEDARG (ap, const char *, format);
+
+ result = vfprintf (stdout, format, ap);
+ VA_CLOSE (ap);
+
+ return result;
+ }
+
+
+ You can declare variables either before or after the VA_OPEN,
+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
+ and end of a block. They must appear at the same nesting level,
+ and any variables declared after VA_OPEN go out of scope at
+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
+ pairs in a single function in case you need to traverse the
+ argument list more than once.
+
+ For ease of writing code which uses GCC extensions but needs to be
+ portable to other compilers, we provide the GCC_VERSION macro that
+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
+ wrappers around __attribute__. Also, __extension__ will be #defined
+ to nothing if it doesn't work. See below.
+
+ This header also defines a lot of obsolete macros:
+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
+ AND, DOTS, NOARGS. Don't use them. */
+
+#ifndef _ANSIDECL_H
+#define _ANSIDECL_H 1
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+/* Using MACRO(x,y) in cpp #if conditionals does not work with some
+ older preprocessors. Thus we can't define something like this:
+
+#define HAVE_GCC_VERSION(MAJOR, MINOR) \
+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
+
+and then test "#if HAVE_GCC_VERSION(2,7)".
+
+So instead we use the macro below and test it against specific values. */
+
+/* This macro simplifies testing whether we are using gcc, and if it
+ is of a particular minimum version. (Both major & minor numbers are
+ significant.) This macro will evaluate to 0 if we are not using
+ gcc at all. */
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif /* GCC_VERSION */
+
+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus))
+/* All known AIX compilers implement these things (but don't always
+ define __STDC__). The RISC/OS MIPS compiler defines these things
+ in SVR4 mode, but does not define __STDC__. */
+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
+ C++ compilers, does not define __STDC__, though it acts as if this
+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
+
+#define ANSI_PROTOTYPES 1
+#define PTR void *
+#define PTRCONST void *const
+#define LONG_DOUBLE long double
+
+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
+ a #ifndef. */
+//#ifndef PARAMS
+//#define PARAMS(ARGS) ARGS
+//#endif
+
+#define VPARAMS(ARGS) ARGS
+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
+
+/* variadic function helper macros */
+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
+ use without inhibiting further decls and without declaring an
+ actual variable. */
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, T, N) struct Qdmy
+
+#undef const
+#undef volatile
+#undef signed
+
+#ifdef __KERNEL__
+#ifndef __STDC_VERSION__
+#define __STDC_VERSION__ 0
+#endif
+#endif /* __KERNEL__ */
+
+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
+ it too, but it's not in C89. */
+#undef inline
+#if __STDC_VERSION__ > 199901L
+/* it's a keyword */
+#else
+# if GCC_VERSION >= 2007
+# define inline __inline__ /* __inline__ prevents -pedantic warnings */
+# else
+# define inline /* nothing */
+# endif
+#endif
+
+/* These are obsolete. Do not use. */
+#ifndef IN_GCC
+#define CONST const
+#define VOLATILE volatile
+#define SIGNED signed
+
+#define PROTO(type, name, arglist) type name arglist
+#define EXFUN(name, proto) name proto
+#define DEFUN(name, arglist, args) name(args)
+#define DEFUN_VOID(name) name(void)
+#define AND ,
+#define DOTS , ...
+#define NOARGS void
+#endif /* ! IN_GCC */
+
+#else /* Not ANSI C. */
+
+#undef ANSI_PROTOTYPES
+#define PTR char *
+#define PTRCONST PTR
+#define LONG_DOUBLE double
+
+//#define PARAMS(args) ()
+#define VPARAMS(args) (va_alist) va_dcl
+#define VA_START(va_list, var) va_start(va_list)
+
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
+
+/* some systems define these in header files for non-ansi mode */
+#undef const
+#undef volatile
+#undef signed
+#undef inline
+#define const
+#define volatile
+#define signed
+#define inline
+
+#ifndef IN_GCC
+#define CONST
+#define VOLATILE
+#define SIGNED
+
+#define PROTO(type, name, arglist) type name ()
+#define EXFUN(name, proto) name()
+#define DEFUN(name, arglist, args) name arglist args;
+#define DEFUN_VOID(name) name()
+#define AND ;
+#define DOTS
+#define NOARGS
+#endif /* ! IN_GCC */
+
+#endif /* ANSI C. */
+
+/* Define macros for some gcc attributes. This permits us to use the
+ macros freely, and know that they will come into play for the
+ version of gcc in which they are supported. */
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif /* GNUC >= 2.96 */
+#endif /* ATTRIBUTE_MALLOC */
+
+/* Attributes on labels were valid as of gcc 2.93. */
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# if (!defined (__cplusplus) && GCC_VERSION >= 2093)
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif /* !__cplusplus && GNUC >= 2.93 */
+#endif /* ATTRIBUTE_UNUSED_LABEL */
+
+#ifndef ATTRIBUTE_UNUSED
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif /* ATTRIBUTE_UNUSED */
+
+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
+ identifier name. */
+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
+#else /* !__cplusplus || GNUC >= 3.4 */
+# define ARG_UNUSED(NAME) NAME
+#endif /* !__cplusplus || GNUC >= 3.4 */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+/* Attribute `nonnull' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_NONNULL
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
+# else
+# define ATTRIBUTE_NONNULL(m)
+# endif /* GNUC >= 3.3 */
+#endif /* ATTRIBUTE_NONNULL */
+
+/* Attribute `pure' was valid as of gcc 3.0. */
+#ifndef ATTRIBUTE_PURE
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define ATTRIBUTE_PURE
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_PURE */
+
+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
+ This was the case for the `printf' format attribute by itself
+ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
+ attribute to retain this behavior. */
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
+ a function pointer. Format attributes were allowed on function
+ pointers as of gcc 3.1. */
+#ifndef ATTRIBUTE_FPTR_PRINTF
+# if (GCC_VERSION >= 3001)
+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
+# else
+# define ATTRIBUTE_FPTR_PRINTF(m, n)
+# endif /* GNUC >= 3.1 */
+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
+#endif /* ATTRIBUTE_FPTR_PRINTF */
+
+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
+ NULL format specifier was allowed as of gcc 3.3. */
+#ifndef ATTRIBUTE_NULL_PRINTF
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
+# else
+# define ATTRIBUTE_NULL_PRINTF(m, n)
+# endif /* GNUC >= 3.3 */
+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
+#endif /* ATTRIBUTE_NULL_PRINTF */
+
+/* Attribute `sentinel' was valid as of gcc 3.5. */
+#ifndef ATTRIBUTE_SENTINEL
+# if (GCC_VERSION >= 3005)
+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
+# else
+# define ATTRIBUTE_SENTINEL
+# endif /* GNUC >= 3.5 */
+#endif /* ATTRIBUTE_SENTINEL */
+
+
+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
+# else
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
+
+/* We use __extension__ in some places to suppress -pedantic warnings
+ about GCC extensions. This feature didn't work properly before
+ gcc 2.8. */
+#if GCC_VERSION < 2008
+#define __extension__
+#endif
+
+#endif /* ansidecl.h */
--- /dev/null
+++ b/arch/x86/include/asm/ansidecl_64.h
@@ -0,0 +1,383 @@
+/* ANSI and traditional C compatability macros
+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
+ * required.
+ * Keith Owens <kaos@sgi.com> 15 May 2006
+ */
+
+/* ANSI and traditional C compatibility macros
+
+ ANSI C is assumed if __STDC__ is #defined.
+
+ Macro ANSI C definition Traditional C definition
+ ----- ---- - ---------- ----------- - ----------
+ ANSI_PROTOTYPES 1 not defined
+ PTR `void *' `char *'
+ PTRCONST `void *const' `char *'
+ LONG_DOUBLE `long double' `double'
+ const not defined `'
+ volatile not defined `'
+ signed not defined `'
+ VA_START(ap, var) va_start(ap, var) va_start(ap)
+
+ Note that it is safe to write "void foo();" indicating a function
+ with no return value, in all K+R compilers we have been able to test.
+
+ For declaring functions with prototypes, we also provide these:
+
+ PARAMS ((prototype))
+ -- for functions which take a fixed number of arguments. Use this
+ when declaring the function. When defining the function, write a
+ K+R style argument list. For example:
+
+ char *strcpy PARAMS ((char *dest, char *source));
+ ...
+ char *
+ strcpy (dest, source)
+ char *dest;
+ char *source;
+ { ... }
+
+
+ VPARAMS ((prototype, ...))
+ -- for functions which take a variable number of arguments. Use
+ PARAMS to declare the function, VPARAMS to define it. For example:
+
+ int printf PARAMS ((const char *format, ...));
+ ...
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ ...
+ }
+
+ For writing functions which take variable numbers of arguments, we
+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
+ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
+ thoroughly than the simple VA_START() macro mentioned above.
+
+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
+ corresponding to the list of fixed arguments. Then use va_arg
+ normally to get the variable arguments, or pass your va_list object
+ around. You do not declare the va_list yourself; VA_OPEN does it
+ for you.
+
+ Here is a complete example:
+
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ int result;
+
+ VA_OPEN (ap, format);
+ VA_FIXEDARG (ap, const char *, format);
+
+ result = vfprintf (stdout, format, ap);
+ VA_CLOSE (ap);
+
+ return result;
+ }
+
+
+ You can declare variables either before or after the VA_OPEN,
+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
+ and end of a block. They must appear at the same nesting level,
+ and any variables declared after VA_OPEN go out of scope at
+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
+ pairs in a single function in case you need to traverse the
+ argument list more than once.
+
+ For ease of writing code which uses GCC extensions but needs to be
+ portable to other compilers, we provide the GCC_VERSION macro that
+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
+ wrappers around __attribute__. Also, __extension__ will be #defined
+ to nothing if it doesn't work. See below.
+
+ This header also defines a lot of obsolete macros:
+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
+ AND, DOTS, NOARGS. Don't use them. */
+
+#ifndef _ANSIDECL_H
+#define _ANSIDECL_H 1
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+/* Using MACRO(x,y) in cpp #if conditionals does not work with some
+ older preprocessors. Thus we can't define something like this:
+
+#define HAVE_GCC_VERSION(MAJOR, MINOR) \
+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
+
+and then test "#if HAVE_GCC_VERSION(2,7)".
+
+So instead we use the macro below and test it against specific values. */
+
+/* This macro simplifies testing whether we are using gcc, and if it
+ is of a particular minimum version. (Both major & minor numbers are
+ significant.) This macro will evaluate to 0 if we are not using
+ gcc at all. */
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif /* GCC_VERSION */
+
+#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus))
+/* All known AIX compilers implement these things (but don't always
+ define __STDC__). The RISC/OS MIPS compiler defines these things
+ in SVR4 mode, but does not define __STDC__. */
+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
+ C++ compilers, does not define __STDC__, though it acts as if this
+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
+
+#define ANSI_PROTOTYPES 1
+#define PTR void *
+#define PTRCONST void *const
+#define LONG_DOUBLE long double
+
+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
+ a #ifndef. */
+//#ifndef PARAMS
+//#define PARAMS(ARGS) ARGS
+//#endif
+
+#define VPARAMS(ARGS) ARGS
+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
+
+/* variadic function helper macros */
+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
+ use without inhibiting further decls and without declaring an
+ actual variable. */
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, T, N) struct Qdmy
+
+#undef const
+#undef volatile
+#undef signed
+
+#ifdef __KERNEL__
+#ifndef __STDC_VERSION__
+#define __STDC_VERSION__ 0
+#endif
+#endif /* __KERNEL__ */
+
+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
+ it too, but it's not in C89. */
+#undef inline
+#if __STDC_VERSION__ > 199901L
+/* it's a keyword */
+#else
+# if GCC_VERSION >= 2007
+# define inline __inline__ /* __inline__ prevents -pedantic warnings */
+# else
+# define inline /* nothing */
+# endif
+#endif
+
+/* These are obsolete. Do not use. */
+#ifndef IN_GCC
+#define CONST const
+#define VOLATILE volatile
+#define SIGNED signed
+
+#define PROTO(type, name, arglist) type name arglist
+#define EXFUN(name, proto) name proto
+#define DEFUN(name, arglist, args) name(args)
+#define DEFUN_VOID(name) name(void)
+#define AND ,
+#define DOTS , ...
+#define NOARGS void
+#endif /* ! IN_GCC */
+
+#else /* Not ANSI C. */
+
+#undef ANSI_PROTOTYPES
+#define PTR char *
+#define PTRCONST PTR
+#define LONG_DOUBLE double
+
+//#define PARAMS(args) ()
+#define VPARAMS(args) (va_alist) va_dcl
+#define VA_START(va_list, var) va_start(va_list)
+
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
+
+/* some systems define these in header files for non-ansi mode */
+#undef const
+#undef volatile
+#undef signed
+#undef inline
+#define const
+#define volatile
+#define signed
+#define inline
+
+#ifndef IN_GCC
+#define CONST
+#define VOLATILE
+#define SIGNED
+
+#define PROTO(type, name, arglist) type name ()
+#define EXFUN(name, proto) name()
+#define DEFUN(name, arglist, args) name arglist args;
+#define DEFUN_VOID(name) name()
+#define AND ;
+#define DOTS
+#define NOARGS
+#endif /* ! IN_GCC */
+
+#endif /* ANSI C. */
+
+/* Define macros for some gcc attributes. This permits us to use the
+ macros freely, and know that they will come into play for the
+ version of gcc in which they are supported. */
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif /* GNUC >= 2.96 */
+#endif /* ATTRIBUTE_MALLOC */
+
+/* Attributes on labels were valid as of gcc 2.93. */
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# if (!defined (__cplusplus) && GCC_VERSION >= 2093)
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif /* !__cplusplus && GNUC >= 2.93 */
+#endif /* ATTRIBUTE_UNUSED_LABEL */
+
+#ifndef ATTRIBUTE_UNUSED
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif /* ATTRIBUTE_UNUSED */
+
+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
+ identifier name. */
+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
+#else /* !__cplusplus || GNUC >= 3.4 */
+# define ARG_UNUSED(NAME) NAME
+#endif /* !__cplusplus || GNUC >= 3.4 */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+/* Attribute `nonnull' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_NONNULL
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
+# else
+# define ATTRIBUTE_NONNULL(m)
+# endif /* GNUC >= 3.3 */
+#endif /* ATTRIBUTE_NONNULL */
+
+/* Attribute `pure' was valid as of gcc 3.0. */
+#ifndef ATTRIBUTE_PURE
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define ATTRIBUTE_PURE
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_PURE */
+
+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
+ This was the case for the `printf' format attribute by itself
+ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
+ attribute to retain this behavior. */
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
+ a function pointer. Format attributes were allowed on function
+ pointers as of gcc 3.1. */
+#ifndef ATTRIBUTE_FPTR_PRINTF
+# if (GCC_VERSION >= 3001)
+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
+# else
+# define ATTRIBUTE_FPTR_PRINTF(m, n)
+# endif /* GNUC >= 3.1 */
+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
+#endif /* ATTRIBUTE_FPTR_PRINTF */
+
+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
+ NULL format specifier was allowed as of gcc 3.3. */
+#ifndef ATTRIBUTE_NULL_PRINTF
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
+# else
+# define ATTRIBUTE_NULL_PRINTF(m, n)
+# endif /* GNUC >= 3.3 */
+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
+#endif /* ATTRIBUTE_NULL_PRINTF */
+
+/* Attribute `sentinel' was valid as of gcc 3.5. */
+#ifndef ATTRIBUTE_SENTINEL
+# if (GCC_VERSION >= 3005)
+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
+# else
+# define ATTRIBUTE_SENTINEL
+# endif /* GNUC >= 3.5 */
+#endif /* ATTRIBUTE_SENTINEL */
+
+
+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
+# else
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
+
+/* We use __extension__ in some places to suppress -pedantic warnings
+ about GCC extensions. This feature didn't work properly before
+ gcc 2.8. */
+#if GCC_VERSION < 2008
+#define __extension__
+#endif
+
+#endif /* ansidecl.h */
--- /dev/null
+++ b/arch/x86/include/asm/bfd.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include "bfd_32.h"
+#else
+# include "bfd_64.h"
+#endif
--- /dev/null
+++ b/arch/x86/include/asm/bfd_32.h
@@ -0,0 +1,4921 @@
+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
+ "linker.c" and "simple.c".
+ Run "make headers" in your build bfd/ to regenerate. */
+
+/* Main header file for the bfd library -- portable access to object files.
+
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ Contributed by Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
+ * required.
+ * Keith Owens <kaos@sgi.com> 15 May 2006
+ */
+
+#ifndef __BFD_H_SEEN__
+#define __BFD_H_SEEN__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+#include <asm/ansidecl.h>
+#else /* __KERNEL__ */
+#include "ansidecl.h"
+#include "symcat.h"
+#endif /* __KERNEL__ */
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#ifndef SABER
+/* This hack is to avoid a problem with some strict ANSI C preprocessors.
+ The problem is, "32_" is not a valid preprocessing token, and we don't
+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
+ cause the inner CONCAT2 macros to be evaluated first, producing
+ still-valid pp-tokens. Then the final concatenation can be done. */
+#undef CONCAT4
+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
+#endif
+#endif
+
+/* The word size used by BFD on the host. This may be 64 with a 32
+ bit target if the host is 64 bit, or if other 64 bit targets have
+ been selected with --enable-targets, or if --enable-64-bit-bfd. */
+#ifdef __KERNEL__
+#define BFD_ARCH_SIZE 32
+#else /* __KERNEL__ */
+#define BFD_ARCH_SIZE 64
+#endif /* __KERNEL__ */
+
+/* The word size of the default bfd target. */
+#define BFD_DEFAULT_TARGET_SIZE 32
+
+#define BFD_HOST_64BIT_LONG 0
+#define BFD_HOST_LONG_LONG 1
+#if 1
+#define BFD_HOST_64_BIT long long
+#define BFD_HOST_U_64_BIT unsigned long long
+typedef BFD_HOST_64_BIT bfd_int64_t;
+typedef BFD_HOST_U_64_BIT bfd_uint64_t;
+#endif
+
+#if BFD_ARCH_SIZE >= 64
+#define BFD64
+#endif
+
+#ifndef INLINE
+#if __GNUC__ >= 2
+#define INLINE __inline__
+#else
+#define INLINE
+#endif
+#endif
+
+/* Forward declaration. */
+typedef struct bfd bfd;
+
+/* Boolean type used in bfd. Too many systems define their own
+ versions of "boolean" for us to safely typedef a "boolean" of
+ our own. Using an enum for "bfd_boolean" has its own set of
+ problems, with strange looking casts required to avoid warnings
+ on some older compilers. Thus we just use an int.
+
+ General rule: Functions which are bfd_boolean return TRUE on
+ success and FALSE on failure (unless they're a predicate). */
+
+typedef int bfd_boolean;
+#undef FALSE
+#undef TRUE
+#define FALSE 0
+#define TRUE 1
+
+#ifdef BFD64
+
+#ifndef BFD_HOST_64_BIT
+ #error No 64 bit integer type available
+#endif /* ! defined (BFD_HOST_64_BIT) */
+
+typedef BFD_HOST_U_64_BIT bfd_vma;
+typedef BFD_HOST_64_BIT bfd_signed_vma;
+typedef BFD_HOST_U_64_BIT bfd_size_type;
+typedef BFD_HOST_U_64_BIT symvalue;
+
+#ifndef fprintf_vma
+#if BFD_HOST_64BIT_LONG
+#define sprintf_vma(s,x) sprintf (s, "%016lx", x)
+#define fprintf_vma(f,x) fprintf (f, "%016lx", x)
+#else
+#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff)))
+#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff))
+#define fprintf_vma(s,x) \
+ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
+#define sprintf_vma(s,x) \
+ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
+#endif
+#endif
+
+#else /* not BFD64 */
+
+/* Represent a target address. Also used as a generic unsigned type
+ which is guaranteed to be big enough to hold any arithmetic types
+ we need to deal with. */
+typedef unsigned long bfd_vma;
+
+/* A generic signed type which is guaranteed to be big enough to hold any
+ arithmetic types we need to deal with. Can be assumed to be compatible
+ with bfd_vma in the same way that signed and unsigned ints are compatible
+ (as parameters, in assignment, etc). */
+typedef long bfd_signed_vma;
+
+typedef unsigned long symvalue;
+typedef unsigned long bfd_size_type;
+
+/* Print a bfd_vma x on stream s. */
+#define fprintf_vma(s,x) fprintf (s, "%08lx", x)
+#define sprintf_vma(s,x) sprintf (s, "%08lx", x)
+
+#endif /* not BFD64 */
+
+#define HALF_BFD_SIZE_TYPE \
+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
+
+#ifndef BFD_HOST_64_BIT
+/* Fall back on a 32 bit type. The idea is to make these types always
+ available for function return types, but in the case that
+ BFD_HOST_64_BIT is undefined such a function should abort or
+ otherwise signal an error. */
+typedef bfd_signed_vma bfd_int64_t;
+typedef bfd_vma bfd_uint64_t;
+#endif
+
+/* An offset into a file. BFD always uses the largest possible offset
+ based on the build time availability of fseek, fseeko, or fseeko64. */
+typedef BFD_HOST_64_BIT file_ptr;
+typedef unsigned BFD_HOST_64_BIT ufile_ptr;
+
+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
+
+#define printf_vma(x) fprintf_vma(stdout,x)
+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
+
+typedef unsigned int flagword; /* 32 bits of flags */
+typedef unsigned char bfd_byte;
+
+/* File formats. */
+
+typedef enum bfd_format
+{
+ bfd_unknown = 0, /* File format is unknown. */
+ bfd_object, /* Linker/assembler/compiler output. */
+ bfd_archive, /* Object archive file. */
+ bfd_core, /* Core dump. */
+ bfd_type_end /* Marks the end; don't use it! */
+}
+bfd_format;
+
+/* Values that may appear in the flags field of a BFD. These also
+ appear in the object_flags field of the bfd_target structure, where
+ they indicate the set of flags used by that backend (not all flags
+ are meaningful for all object file formats) (FIXME: at the moment,
+ the object_flags values have mostly just been copied from backend
+ to another, and are not necessarily correct). */
+
+/* No flags. */
+#define BFD_NO_FLAGS 0x00
+
+/* BFD contains relocation entries. */
+#define HAS_RELOC 0x01
+
+/* BFD is directly executable. */
+#define EXEC_P 0x02
+
+/* BFD has line number information (basically used for F_LNNO in a
+ COFF header). */
+#define HAS_LINENO 0x04
+
+/* BFD has debugging information. */
+#define HAS_DEBUG 0x08
+
+/* BFD has symbols. */
+#define HAS_SYMS 0x10
+
+/* BFD has local symbols (basically used for F_LSYMS in a COFF
+ header). */
+#define HAS_LOCALS 0x20
+
+/* BFD is a dynamic object. */
+#define DYNAMIC 0x40
+
+/* Text section is write protected (if D_PAGED is not set, this is
+ like an a.out NMAGIC file) (the linker sets this by default, but
+ clears it for -r or -N). */
+#define WP_TEXT 0x80
+
+/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
+ linker sets this by default, but clears it for -r or -n or -N). */
+#define D_PAGED 0x100
+
+/* BFD is relaxable (this means that bfd_relax_section may be able to
+ do something) (sometimes bfd_relax_section can do something even if
+ this is not set). */
+#define BFD_IS_RELAXABLE 0x200
+
+/* This may be set before writing out a BFD to request using a
+ traditional format. For example, this is used to request that when
+ writing out an a.out object the symbols not be hashed to eliminate
+ duplicates. */
+#define BFD_TRADITIONAL_FORMAT 0x400
+
+/* This flag indicates that the BFD contents are actually cached in
+ memory. If this is set, iostream points to a bfd_in_memory struct. */
+#define BFD_IN_MEMORY 0x800
+
+/* The sections in this BFD specify a memory page. */
+#define HAS_LOAD_PAGE 0x1000
+
+/* This BFD has been created by the linker and doesn't correspond
+ to any input file. */
+#define BFD_LINKER_CREATED 0x2000
+
+/* Symbols and relocation. */
+
+/* A count of carsyms (canonical archive symbols). */
+typedef unsigned long symindex;
+
+/* How to perform a relocation. */
+typedef const struct reloc_howto_struct reloc_howto_type;
+
+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
+
+/* General purpose part of a symbol X;
+ target specific parts are in libcoff.h, libaout.h, etc. */
+
+#define bfd_get_section(x) ((x)->section)
+#define bfd_get_output_section(x) ((x)->section->output_section)
+#define bfd_set_section(x,y) ((x)->section) = (y)
+#define bfd_asymbol_base(x) ((x)->section->vma)
+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
+#define bfd_asymbol_name(x) ((x)->name)
+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
+#define bfd_asymbol_bfd(x) ((x)->the_bfd)
+#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour)
+
+/* A canonical archive symbol. */
+/* This is a type pun with struct ranlib on purpose! */
+typedef struct carsym
+{
+ char *name;
+ file_ptr file_offset; /* Look here to find the file. */
+}
+carsym; /* To make these you call a carsymogen. */
+
+/* Used in generating armaps (archive tables of contents).
+ Perhaps just a forward definition would do? */
+struct orl /* Output ranlib. */
+{
+ char **name; /* Symbol name. */
+ union
+ {
+ file_ptr pos;
+ bfd *abfd;
+ } u; /* bfd* or file position. */
+ int namidx; /* Index into string table. */
+};
+
+/* Linenumber stuff. */
+typedef struct lineno_cache_entry
+{
+ unsigned int line_number; /* Linenumber from start of function. */
+ union
+ {
+ struct bfd_symbol *sym; /* Function name. */
+ bfd_vma offset; /* Offset into section. */
+ } u;
+}
+alent;
+
+/* Object and core file sections. */
+
+#define align_power(addr, align) \
+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
+
+typedef struct bfd_section *sec_ptr;
+
+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0)
+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0)
+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0)
+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0)
+#define bfd_section_name(bfd, ptr) ((ptr)->name)
+#define bfd_section_size(bfd, ptr) ((ptr)->size)
+#define bfd_get_section_size(ptr) ((ptr)->size)
+#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
+#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0)
+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata)
+
+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
+
+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
+/* Find the address one past the end of SEC. */
+#define bfd_get_section_limit(bfd, sec) \
+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \
+ / bfd_octets_per_byte (bfd))
+
+typedef struct stat stat_type;
+
+typedef enum bfd_print_symbol
+{
+ bfd_print_symbol_name,
+ bfd_print_symbol_more,
+ bfd_print_symbol_all
+} bfd_print_symbol_type;
+
+/* Information about a symbol that nm needs. */
+
+typedef struct _symbol_info
+{
+ symvalue value;
+ char type;
+ const char *name; /* Symbol name. */
+ unsigned char stab_type; /* Stab type. */
+ char stab_other; /* Stab other. */
+ short stab_desc; /* Stab desc. */
+ const char *stab_name; /* String for stab type. */
+} symbol_info;
+
+/* Get the name of a stabs type code. */
+
+extern const char *bfd_get_stab_name (int);
+
+/* Hash table routines. There is no way to free up a hash table. */
+
+/* An element in the hash table. Most uses will actually use a larger
+ structure, and an instance of this will be the first field. */
+
+struct bfd_hash_entry
+{
+ /* Next entry for this hash code. */
+ struct bfd_hash_entry *next;
+ /* String being hashed. */
+ const char *string;
+ /* Hash code. This is the full hash code, not the index into the
+ table. */
+ unsigned long hash;
+};
+
+/* A hash table. */
+
+struct bfd_hash_table
+{
+ /* The hash array. */
+ struct bfd_hash_entry **table;
+ /* The number of slots in the hash table. */
+ unsigned int size;
+ /* A function used to create new elements in the hash table. The
+ first entry is itself a pointer to an element. When this
+ function is first invoked, this pointer will be NULL. However,
+ having the pointer permits a hierarchy of method functions to be
+ built each of which calls the function in the superclass. Thus
+ each function should be written to allocate a new block of memory
+ only if the argument is NULL. */
+ struct bfd_hash_entry *(*newfunc)
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+ /* An objalloc for this hash table. This is a struct objalloc *,
+ but we use void * to avoid requiring the inclusion of objalloc.h. */
+ void *memory;
+};
+
+/* Initialize a hash table. */
+extern bfd_boolean bfd_hash_table_init
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *));
+
+/* Initialize a hash table specifying a size. */
+extern bfd_boolean bfd_hash_table_init_n
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int size);
+
+/* Free up a hash table. */
+extern void bfd_hash_table_free
+ (struct bfd_hash_table *);
+
+/* Look up a string in a hash table. If CREATE is TRUE, a new entry
+ will be created for this string if one does not already exist. The
+ COPY argument must be TRUE if this routine should copy the string
+ into newly allocated memory when adding an entry. */
+extern struct bfd_hash_entry *bfd_hash_lookup
+ (struct bfd_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy);
+
+/* Replace an entry in a hash table. */
+extern void bfd_hash_replace
+ (struct bfd_hash_table *, struct bfd_hash_entry *old,
+ struct bfd_hash_entry *nw);
+
+/* Base method for creating a hash table entry. */
+extern struct bfd_hash_entry *bfd_hash_newfunc
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+
+/* Grab some space for a hash table entry. */
+extern void *bfd_hash_allocate
+ (struct bfd_hash_table *, unsigned int);
+
+/* Traverse a hash table in a random order, calling a function on each
+ element. If the function returns FALSE, the traversal stops. The
+ INFO argument is passed to the function. */
+extern void bfd_hash_traverse
+ (struct bfd_hash_table *,
+ bfd_boolean (*) (struct bfd_hash_entry *, void *),
+ void *info);
+
+/* Allows the default size of a hash table to be configured. New hash
+ tables allocated using bfd_hash_table_init will be created with
+ this size. */
+extern void bfd_hash_set_default_size (bfd_size_type);
+
+/* This structure is used to keep track of stabs in sections
+ information while linking. */
+
+struct stab_info
+{
+ /* A hash table used to hold stabs strings. */
+ struct bfd_strtab_hash *strings;
+ /* The header file hash table. */
+ struct bfd_hash_table includes;
+ /* The first .stabstr section. */
+ struct bfd_section *stabstr;
+};
+
+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
+
+/* User program access to BFD facilities. */
+
+/* Direct I/O routines, for programs which know more about the object
+ file than BFD does. Use higher level routines if possible. */
+
+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
+extern int bfd_seek (bfd *, file_ptr, int);
+extern file_ptr bfd_tell (bfd *);
+extern int bfd_flush (bfd *);
+extern int bfd_stat (bfd *, struct stat *);
+
+/* Deprecated old routines. */
+#if __GNUC__
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#else
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#endif
+extern void warn_deprecated (const char *, const char *, int, const char *);
+
+/* Cast from const char * to char * so that caller can assign to
+ a char * without a warning. */
+#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
+#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
+#define bfd_get_format(abfd) ((abfd)->format)
+#define bfd_get_target(abfd) ((abfd)->xvec->name)
+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
+#define bfd_family_coff(abfd) \
+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_header_big_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
+#define bfd_header_little_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_get_file_flags(abfd) ((abfd)->flags)
+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
+#define bfd_my_archive(abfd) ((abfd)->my_archive)
+#define bfd_has_map(abfd) ((abfd)->has_armap)
+
+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
+#define bfd_usrdata(abfd) ((abfd)->usrdata)
+
+#define bfd_get_start_address(abfd) ((abfd)->start_address)
+#define bfd_get_symcount(abfd) ((abfd)->symcount)
+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
+#define bfd_count_sections(abfd) ((abfd)->section_count)
+
+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
+
+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
+
+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
+
+extern bfd_boolean bfd_cache_close
+ (bfd *abfd);
+/* NB: This declaration should match the autogenerated one in libbfd.h. */
+
+extern bfd_boolean bfd_cache_close_all (void);
+
+extern bfd_boolean bfd_record_phdr
+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
+
+/* Byte swapping routines. */
+
+bfd_uint64_t bfd_getb64 (const void *);
+bfd_uint64_t bfd_getl64 (const void *);
+bfd_int64_t bfd_getb_signed_64 (const void *);
+bfd_int64_t bfd_getl_signed_64 (const void *);
+bfd_vma bfd_getb32 (const void *);
+bfd_vma bfd_getl32 (const void *);
+bfd_signed_vma bfd_getb_signed_32 (const void *);
+bfd_signed_vma bfd_getl_signed_32 (const void *);
+bfd_vma bfd_getb16 (const void *);
+bfd_vma bfd_getl16 (const void *);
+bfd_signed_vma bfd_getb_signed_16 (const void *);
+bfd_signed_vma bfd_getl_signed_16 (const void *);
+void bfd_putb64 (bfd_uint64_t, void *);
+void bfd_putl64 (bfd_uint64_t, void *);
+void bfd_putb32 (bfd_vma, void *);
+void bfd_putl32 (bfd_vma, void *);
+void bfd_putb16 (bfd_vma, void *);
+void bfd_putl16 (bfd_vma, void *);
+
+/* Byte swapping routines which take size and endiannes as arguments. */
+
+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
+
+extern bfd_boolean bfd_section_already_linked_table_init (void);
+extern void bfd_section_already_linked_table_free (void);
+
+/* Externally visible ECOFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct ecoff_debug_info;
+struct ecoff_debug_swap;
+struct ecoff_extr;
+struct bfd_symbol;
+struct bfd_link_info;
+struct bfd_link_hash_entry;
+struct bfd_elf_version_tree;
+#endif
+extern bfd_vma bfd_ecoff_get_gp_value
+ (bfd * abfd);
+extern bfd_boolean bfd_ecoff_set_gp_value
+ (bfd *abfd, bfd_vma gp_value);
+extern bfd_boolean bfd_ecoff_set_regmasks
+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
+ unsigned long *cprmask);
+extern void *bfd_ecoff_debug_init
+ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern void bfd_ecoff_debug_free
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct ecoff_debug_info *input_debug,
+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate_other
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_externals
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
+ void (*set_index) (struct bfd_symbol *, bfd_size_type));
+extern bfd_boolean bfd_ecoff_debug_one_external
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, const char *name,
+ struct ecoff_extr *esym);
+extern bfd_size_type bfd_ecoff_debug_size
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap);
+extern bfd_boolean bfd_ecoff_write_debug
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, file_ptr where);
+extern bfd_boolean bfd_ecoff_write_accumulated_debug
+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap,
+ struct bfd_link_info *info, file_ptr where);
+
+/* Externally visible ELF routines. */
+
+struct bfd_link_needed_list
+{
+ struct bfd_link_needed_list *next;
+ bfd *by;
+ const char *name;
+};
+
+enum dynamic_lib_link_class {
+ DYN_NORMAL = 0,
+ DYN_AS_NEEDED = 1,
+ DYN_DT_NEEDED = 2,
+ DYN_NO_ADD_NEEDED = 4,
+ DYN_NO_NEEDED = 8
+};
+
+extern bfd_boolean bfd_elf_record_link_assignment
+ (struct bfd_link_info *, const char *, bfd_boolean);
+extern struct bfd_link_needed_list *bfd_elf_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_get_bfd_needed_list
+ (bfd *, struct bfd_link_needed_list **);
+extern bfd_boolean bfd_elf_size_dynamic_sections
+ (bfd *, const char *, const char *, const char *, const char * const *,
+ struct bfd_link_info *, struct bfd_section **,
+ struct bfd_elf_version_tree *);
+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
+ (bfd *, struct bfd_link_info *);
+extern void bfd_elf_set_dt_needed_name
+ (bfd *, const char *);
+extern const char *bfd_elf_get_dt_soname
+ (bfd *);
+extern void bfd_elf_set_dyn_lib_class
+ (bfd *, int);
+extern int bfd_elf_get_dyn_lib_class
+ (bfd *);
+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_discard_info
+ (bfd *, struct bfd_link_info *);
+extern unsigned int _bfd_elf_default_action_discarded
+ (struct bfd_section *);
+
+/* Return an upper bound on the number of bytes required to store a
+ copy of ABFD's program header table entries. Return -1 if an error
+ occurs; bfd_get_error will return an appropriate code. */
+extern long bfd_get_elf_phdr_upper_bound
+ (bfd *abfd);
+
+/* Copy ABFD's program header table entries to *PHDRS. The entries
+ will be stored as an array of Elf_Internal_Phdr structures, as
+ defined in include/elf/internal.h. To find out how large the
+ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
+
+ Return the number of program header table entries read, or -1 if an
+ error occurs; bfd_get_error will return an appropriate code. */
+extern int bfd_get_elf_phdrs
+ (bfd *abfd, void *phdrs);
+
+/* Create a new BFD as if by bfd_openr. Rather than opening a file,
+ reconstruct an ELF file by reading the segments out of remote memory
+ based on the ELF file header at EHDR_VMA and the ELF program headers it
+ points to. If not null, *LOADBASEP is filled in with the difference
+ between the VMAs from which the segments were read, and the VMAs the
+ file headers (and hence BFD's idea of each section's VMA) put them at.
+
+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
+ remote memory at target address VMA into the local buffer at MYADDR; it
+ should return zero on success or an `errno' code on failure. TEMPL must
+ be a BFD for an ELF target with the word size and byte order found in
+ the remote memory. */
+extern bfd *bfd_elf_bfd_from_remote_memory
+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len));
+
+/* Return the arch_size field of an elf bfd, or -1 if not elf. */
+extern int bfd_get_arch_size
+ (bfd *);
+
+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */
+extern int bfd_get_sign_extend_vma
+ (bfd *);
+
+extern struct bfd_section *_bfd_elf_tls_setup
+ (bfd *, struct bfd_link_info *);
+
+extern void _bfd_elf_provide_symbol
+ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *);
+
+extern void _bfd_elf_provide_section_bound_symbols
+ (struct bfd_link_info *, struct bfd_section *, const char *, const char *);
+
+extern void _bfd_elf_fix_excluded_sec_syms
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+/* SunOS shared library support routines for the linker. */
+
+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sunos_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_sunos_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, struct bfd_section **,
+ struct bfd_section **, struct bfd_section **);
+
+/* Linux shared library support routines for the linker. */
+
+extern bfd_boolean bfd_i386linux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_m68klinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sparclinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+
+/* mmap hacks */
+
+struct _bfd_window_internal;
+typedef struct _bfd_window_internal bfd_window_internal;
+
+typedef struct _bfd_window
+{
+ /* What the user asked for. */
+ void *data;
+ bfd_size_type size;
+ /* The actual window used by BFD. Small user-requested read-only
+ regions sharing a page may share a single window into the object
+ file. Read-write versions shouldn't until I've fixed things to
+ keep track of which portions have been claimed by the
+ application; don't want to give the same region back when the
+ application wants two writable copies! */
+ struct _bfd_window_internal *i;
+}
+bfd_window;
+
+extern void bfd_init_window
+ (bfd_window *);
+extern void bfd_free_window
+ (bfd_window *);
+extern bfd_boolean bfd_get_file_window
+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
+
+/* XCOFF support routines for the linker. */
+
+extern bfd_boolean bfd_xcoff_link_record_set
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
+extern bfd_boolean bfd_xcoff_import_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
+ const char *, const char *, const char *, unsigned int);
+extern bfd_boolean bfd_xcoff_export_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
+extern bfd_boolean bfd_xcoff_link_count_reloc
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, const char *, const char *,
+ unsigned long, unsigned long, unsigned long, bfd_boolean,
+ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean);
+extern bfd_boolean bfd_xcoff_link_generate_rtinit
+ (bfd *, const char *, const char *, bfd_boolean);
+
+/* XCOFF support routines for ar. */
+extern bfd_boolean bfd_xcoff_ar_archive_set_magic
+ (bfd *, char *);
+
+/* Externally visible COFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct internal_syment;
+union internal_auxent;
+#endif
+
+extern bfd_boolean bfd_coff_get_syment
+ (bfd *, struct bfd_symbol *, struct internal_syment *);
+
+extern bfd_boolean bfd_coff_get_auxent
+ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
+
+extern bfd_boolean bfd_coff_set_symbol_class
+ (bfd *, struct bfd_symbol *, unsigned int);
+
+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
+
+/* ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* PE ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_pe_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+void bfd_elf32_arm_set_target_relocs
+ (struct bfd_link_info *, int, char *, int, int);
+
+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM mapping symbol support */
+extern bfd_boolean bfd_is_arm_mapping_symbol_name
+ (const char * name);
+
+/* ARM Note section processing. */
+extern bfd_boolean bfd_arm_merge_machines
+ (bfd *, bfd *);
+
+extern bfd_boolean bfd_arm_update_notes
+ (bfd *, const char *);
+
+extern unsigned int bfd_arm_get_mach_from_notes
+ (bfd *, const char *);
+
+/* TI COFF load page support. */
+extern void bfd_ticoff_set_section_load_page
+ (struct bfd_section *, int);
+
+extern int bfd_ticoff_get_section_load_page
+ (struct bfd_section *);
+
+/* H8/300 functions. */
+extern bfd_vma bfd_h8300_pad_address
+ (bfd *, bfd_vma);
+
+/* IA64 Itanium code generation. Called from linker. */
+extern void bfd_elf32_ia64_after_parse
+ (int);
+
+extern void bfd_elf64_ia64_after_parse
+ (int);
+
+/* This structure is used for a comdat section, as in PE. A comdat
+ section is associated with a particular symbol. When the linker
+ sees a comdat section, it keeps only one of the sections with a
+ given name and associated with a given symbol. */
+
+struct coff_comdat_info
+{
+ /* The name of the symbol associated with a comdat section. */
+ const char *name;
+
+ /* The local symbol table index of the symbol associated with a
+ comdat section. This is only meaningful to the object file format
+ specific code; it is not an index into the list returned by
+ bfd_canonicalize_symtab. */
+ long symbol;
+};
+
+extern struct coff_comdat_info *bfd_coff_get_comdat_section
+ (bfd *, struct bfd_section *);
+
+/* Extracted from init.c. */
+void bfd_init (void);
+
+/* Extracted from opncls.c. */
+bfd *bfd_fopen (const char *filename, const char *target,
+ const char *mode, int fd);
+
+bfd *bfd_openr (const char *filename, const char *target);
+
+bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
+
+bfd *bfd_openstreamr (const char *, const char *, void *);
+
+bfd *bfd_openr_iovec (const char *filename, const char *target,
+ void *(*open) (struct bfd *nbfd,
+ void *open_closure),
+ void *open_closure,
+ file_ptr (*pread) (struct bfd *nbfd,
+ void *stream,
+ void *buf,
+ file_ptr nbytes,
+ file_ptr offset),
+ int (*close) (struct bfd *nbfd,
+ void *stream));
+
+bfd *bfd_openw (const char *filename, const char *target);
+
+bfd_boolean bfd_close (bfd *abfd);
+
+bfd_boolean bfd_close_all_done (bfd *);
+
+bfd *bfd_create (const char *filename, bfd *templ);
+
+bfd_boolean bfd_make_writable (bfd *abfd);
+
+bfd_boolean bfd_make_readable (bfd *abfd);
+
+unsigned long bfd_calc_gnu_debuglink_crc32
+ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
+
+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
+
+struct bfd_section *bfd_create_gnu_debuglink_section
+ (bfd *abfd, const char *filename);
+
+bfd_boolean bfd_fill_in_gnu_debuglink_section
+ (bfd *abfd, struct bfd_section *sect, const char *filename);
+
+/* Extracted from libbfd.c. */
+
+/* Byte swapping macros for user section data. */
+
+#define bfd_put_8(abfd, val, ptr) \
+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
+#define bfd_put_signed_8 \
+ bfd_put_8
+#define bfd_get_8(abfd, ptr) \
+ (*(unsigned char *) (ptr) & 0xff)
+#define bfd_get_signed_8(abfd, ptr) \
+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
+
+#define bfd_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
+#define bfd_put_signed_16 \
+ bfd_put_16
+#define bfd_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx16, (ptr))
+#define bfd_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
+
+#define bfd_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
+#define bfd_put_signed_32 \
+ bfd_put_32
+#define bfd_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx32, (ptr))
+#define bfd_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
+
+#define bfd_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
+#define bfd_put_signed_64 \
+ bfd_put_64
+#define bfd_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx64, (ptr))
+#define bfd_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
+
+#define bfd_get(bits, abfd, ptr) \
+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
+ : (abort (), (bfd_vma) - 1))
+
+#define bfd_put(bits, abfd, val, ptr) \
+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
+ : (abort (), (void) 0))
+
+
+/* Byte swapping macros for file header data. */
+
+#define bfd_h_put_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_put_signed_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_get_8(abfd, ptr) \
+ bfd_get_8 (abfd, ptr)
+#define bfd_h_get_signed_8(abfd, ptr) \
+ bfd_get_signed_8 (abfd, ptr)
+
+#define bfd_h_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
+#define bfd_h_put_signed_16 \
+ bfd_h_put_16
+#define bfd_h_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx16, (ptr))
+#define bfd_h_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
+
+#define bfd_h_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
+#define bfd_h_put_signed_32 \
+ bfd_h_put_32
+#define bfd_h_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx32, (ptr))
+#define bfd_h_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
+
+#define bfd_h_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
+#define bfd_h_put_signed_64 \
+ bfd_h_put_64
+#define bfd_h_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx64, (ptr))
+#define bfd_h_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
+
+/* Aliases for the above, which should eventually go away. */
+
+#define H_PUT_64 bfd_h_put_64
+#define H_PUT_32 bfd_h_put_32
+#define H_PUT_16 bfd_h_put_16
+#define H_PUT_8 bfd_h_put_8
+#define H_PUT_S64 bfd_h_put_signed_64
+#define H_PUT_S32 bfd_h_put_signed_32
+#define H_PUT_S16 bfd_h_put_signed_16
+#define H_PUT_S8 bfd_h_put_signed_8
+#define H_GET_64 bfd_h_get_64
+#define H_GET_32 bfd_h_get_32
+#define H_GET_16 bfd_h_get_16
+#define H_GET_8 bfd_h_get_8
+#define H_GET_S64 bfd_h_get_signed_64
+#define H_GET_S32 bfd_h_get_signed_32
+#define H_GET_S16 bfd_h_get_signed_16
+#define H_GET_S8 bfd_h_get_signed_8
+
+
+/* Extracted from bfdio.c. */
+long bfd_get_mtime (bfd *abfd);
+
+long bfd_get_size (bfd *abfd);
+
+/* Extracted from bfdwin.c. */
+/* Extracted from section.c. */
+typedef struct bfd_section
+{
+ /* The name of the section; the name isn't a copy, the pointer is
+ the same as that passed to bfd_make_section. */
+ const char *name;
+
+ /* A unique sequence number. */
+ int id;
+
+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
+ int index;
+
+ /* The next section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *next;
+
+ /* The previous section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *prev;
+
+ /* The field flags contains attributes of the section. Some
+ flags are read in from the object file, and some are
+ synthesized from other information. */
+ flagword flags;
+
+#define SEC_NO_FLAGS 0x000
+
+ /* Tells the OS to allocate space for this section when loading.
+ This is clear for a section containing debug information only. */
+#define SEC_ALLOC 0x001
+
+ /* Tells the OS to load the section from the file when loading.
+ This is clear for a .bss section. */
+#define SEC_LOAD 0x002
+
+ /* The section contains data still to be relocated, so there is
+ some relocation information too. */
+#define SEC_RELOC 0x004
+
+ /* A signal to the OS that the section contains read only data. */
+#define SEC_READONLY 0x008
+
+ /* The section contains code only. */
+#define SEC_CODE 0x010
+
+ /* The section contains data only. */
+#define SEC_DATA 0x020
+
+ /* The section will reside in ROM. */
+#define SEC_ROM 0x040
+
+ /* The section contains constructor information. This section
+ type is used by the linker to create lists of constructors and
+ destructors used by <<g++>>. When a back end sees a symbol
+ which should be used in a constructor list, it creates a new
+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
+ the symbol to it, and builds a relocation. To build the lists
+ of constructors, all the linker has to do is catenate all the
+ sections called <<__CTOR_LIST__>> and relocate the data
+ contained within - exactly the operations it would peform on
+ standard data. */
+#define SEC_CONSTRUCTOR 0x080
+
+ /* The section has contents - a data section could be
+ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
+ <<SEC_HAS_CONTENTS>> */
+#define SEC_HAS_CONTENTS 0x100
+
+ /* An instruction to the linker to not output the section
+ even if it has information which would normally be written. */
+#define SEC_NEVER_LOAD 0x200
+
+ /* The section contains thread local data. */
+#define SEC_THREAD_LOCAL 0x400
+
+ /* The section has GOT references. This flag is only for the
+ linker, and is currently only used by the elf32-hppa back end.
+ It will be set if global offset table references were detected
+ in this section, which indicate to the linker that the section
+ contains PIC code, and must be handled specially when doing a
+ static link. */
+#define SEC_HAS_GOT_REF 0x800
+
+ /* The section contains common symbols (symbols may be defined
+ multiple times, the value of a symbol is the amount of
+ space it requires, and the largest symbol value is the one
+ used). Most targets have exactly one of these (which we
+ translate to bfd_com_section_ptr), but ECOFF has two. */
+#define SEC_IS_COMMON 0x1000
+
+ /* The section contains only debugging information. For
+ example, this is set for ELF .debug and .stab sections.
+ strip tests this flag to see if a section can be
+ discarded. */
+#define SEC_DEBUGGING 0x2000
+
+ /* The contents of this section are held in memory pointed to
+ by the contents field. This is checked by bfd_get_section_contents,
+ and the data is retrieved from memory if appropriate. */
+#define SEC_IN_MEMORY 0x4000
+
+ /* The contents of this section are to be excluded by the
+ linker for executable and shared objects unless those
+ objects are to be further relocated. */
+#define SEC_EXCLUDE 0x8000
+
+ /* The contents of this section are to be sorted based on the sum of
+ the symbol and addend values specified by the associated relocation
+ entries. Entries without associated relocation entries will be
+ appended to the end of the section in an unspecified order. */
+#define SEC_SORT_ENTRIES 0x10000
+
+ /* When linking, duplicate sections of the same name should be
+ discarded, rather than being combined into a single section as
+ is usually done. This is similar to how common symbols are
+ handled. See SEC_LINK_DUPLICATES below. */
+#define SEC_LINK_ONCE 0x20000
+
+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
+ should handle duplicate sections. */
+#define SEC_LINK_DUPLICATES 0x40000
+
+ /* This value for SEC_LINK_DUPLICATES means that duplicate
+ sections with the same name should simply be discarded. */
+#define SEC_LINK_DUPLICATES_DISCARD 0x0
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if there are any duplicate sections, although
+ it should still only link one copy. */
+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections are a different size. */
+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections contain different
+ contents. */
+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
+
+ /* This section was created by the linker as part of dynamic
+ relocation or other arcane processing. It is skipped when
+ going through the first-pass output, trusting that someone
+ else up the line will take care of it later. */
+#define SEC_LINKER_CREATED 0x200000
+
+ /* This section should not be subject to garbage collection. */
+#define SEC_KEEP 0x400000
+
+ /* This section contains "short" data, and should be placed
+ "near" the GP. */
+#define SEC_SMALL_DATA 0x800000
+
+ /* Attempt to merge identical entities in the section.
+ Entity size is given in the entsize field. */
+#define SEC_MERGE 0x1000000
+
+ /* If given with SEC_MERGE, entities to merge are zero terminated
+ strings where entsize specifies character size instead of fixed
+ size entries. */
+#define SEC_STRINGS 0x2000000
+
+ /* This section contains data about section groups. */
+#define SEC_GROUP 0x4000000
+
+ /* The section is a COFF shared library section. This flag is
+ only for the linker. If this type of section appears in
+ the input file, the linker must copy it to the output file
+ without changing the vma or size. FIXME: Although this
+ was originally intended to be general, it really is COFF
+ specific (and the flag was renamed to indicate this). It
+ might be cleaner to have some more general mechanism to
+ allow the back end to control what the linker does with
+ sections. */
+#define SEC_COFF_SHARED_LIBRARY 0x10000000
+
+ /* This section contains data which may be shared with other
+ executables or shared objects. This is for COFF only. */
+#define SEC_COFF_SHARED 0x20000000
+
+ /* When a section with this flag is being linked, then if the size of
+ the input section is less than a page, it should not cross a page
+ boundary. If the size of the input section is one page or more,
+ it should be aligned on a page boundary. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_BLOCK 0x40000000
+
+ /* Conditionally link this section; do not link if there are no
+ references found to any symbol in the section. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_CLINK 0x80000000
+
+ /* End of section flags. */
+
+ /* Some internal packed boolean fields. */
+
+ /* See the vma field. */
+ unsigned int user_set_vma : 1;
+
+ /* A mark flag used by some of the linker backends. */
+ unsigned int linker_mark : 1;
+
+ /* Another mark flag used by some of the linker backends. Set for
+ output sections that have an input section. */
+ unsigned int linker_has_input : 1;
+
+ /* Mark flags used by some linker backends for garbage collection. */
+ unsigned int gc_mark : 1;
+ unsigned int gc_mark_from_eh : 1;
+
+ /* The following flags are used by the ELF linker. */
+
+ /* Mark sections which have been allocated to segments. */
+ unsigned int segment_mark : 1;
+
+ /* Type of sec_info information. */
+ unsigned int sec_info_type:3;
+#define ELF_INFO_TYPE_NONE 0
+#define ELF_INFO_TYPE_STABS 1
+#define ELF_INFO_TYPE_MERGE 2
+#define ELF_INFO_TYPE_EH_FRAME 3
+#define ELF_INFO_TYPE_JUST_SYMS 4
+
+ /* Nonzero if this section uses RELA relocations, rather than REL. */
+ unsigned int use_rela_p:1;
+
+ /* Bits used by various backends. The generic code doesn't touch
+ these fields. */
+
+ /* Nonzero if this section has TLS related relocations. */
+ unsigned int has_tls_reloc:1;
+
+ /* Nonzero if this section has a gp reloc. */
+ unsigned int has_gp_reloc:1;
+
+ /* Nonzero if this section needs the relax finalize pass. */
+ unsigned int need_finalize_relax:1;
+
+ /* Whether relocations have been processed. */
+ unsigned int reloc_done : 1;
+
+ /* End of internal packed boolean fields. */
+
+ /* The virtual memory address of the section - where it will be
+ at run time. The symbols are relocated against this. The
+ user_set_vma flag is maintained by bfd; if it's not set, the
+ backend can assign addresses (for example, in <<a.out>>, where
+ the default address for <<.data>> is dependent on the specific
+ target and various flags). */
+ bfd_vma vma;
+
+ /* The load address of the section - where it would be in a
+ rom image; really only used for writing section header
+ information. */
+ bfd_vma lma;
+
+ /* The size of the section in octets, as it will be output.
+ Contains a value even if the section has no contents (e.g., the
+ size of <<.bss>>). */
+ bfd_size_type size;
+
+ /* For input sections, the original size on disk of the section, in
+ octets. This field is used by the linker relaxation code. It is
+ currently only set for sections where the linker relaxation scheme
+ doesn't cache altered section and reloc contents (stabs, eh_frame,
+ SEC_MERGE, some coff relaxing targets), and thus the original size
+ needs to be kept to read the section multiple times.
+ For output sections, rawsize holds the section size calculated on
+ a previous linker relaxation pass. */
+ bfd_size_type rawsize;
+
+ /* If this section is going to be output, then this value is the
+ offset in *bytes* into the output section of the first byte in the
+ input section (byte ==> smallest addressable unit on the
+ target). In most cases, if this was going to start at the
+ 100th octet (8-bit quantity) in the output section, this value
+ would be 100. However, if the target byte size is 16 bits
+ (bfd_octets_per_byte is "2"), this value would be 50. */
+ bfd_vma output_offset;
+
+ /* The output section through which to map on output. */
+ struct bfd_section *output_section;
+
+ /* The alignment requirement of the section, as an exponent of 2 -
+ e.g., 3 aligns to 2^3 (or 8). */
+ unsigned int alignment_power;
+
+ /* If an input section, a pointer to a vector of relocation
+ records for the data in this section. */
+ struct reloc_cache_entry *relocation;
+
+ /* If an output section, a pointer to a vector of pointers to
+ relocation records for the data in this section. */
+ struct reloc_cache_entry **orelocation;
+
+ /* The number of relocation records in one of the above. */
+ unsigned reloc_count;
+
+ /* Information below is back end specific - and not always used
+ or updated. */
+
+ /* File position of section data. */
+ file_ptr filepos;
+
+ /* File position of relocation info. */
+ file_ptr rel_filepos;
+
+ /* File position of line data. */
+ file_ptr line_filepos;
+
+ /* Pointer to data for applications. */
+ void *userdata;
+
+ /* If the SEC_IN_MEMORY flag is set, this points to the actual
+ contents. */
+ unsigned char *contents;
+
+ /* Attached line number information. */
+ alent *lineno;
+
+ /* Number of line number records. */
+ unsigned int lineno_count;
+
+ /* Entity size for merging purposes. */
+ unsigned int entsize;
+
+ /* Points to the kept section if this section is a link-once section,
+ and is discarded. */
+ struct bfd_section *kept_section;
+
+ /* When a section is being output, this value changes as more
+ linenumbers are written out. */
+ file_ptr moving_line_filepos;
+
+ /* What the section number is in the target world. */
+ int target_index;
+
+ void *used_by_bfd;
+
+ /* If this is a constructor section then here is a list of the
+ relocations created to relocate items within it. */
+ struct relent_chain *constructor_chain;
+
+ /* The BFD which owns the section. */
+ bfd *owner;
+
+ /* A symbol which points at this section only. */
+ struct bfd_symbol *symbol;
+ struct bfd_symbol **symbol_ptr_ptr;
+
+ /* Early in the link process, map_head and map_tail are used to build
+ a list of input sections attached to an output section. Later,
+ output sections use these fields for a list of bfd_link_order
+ structs. */
+ union {
+ struct bfd_link_order *link_order;
+ struct bfd_section *s;
+ } map_head, map_tail;
+} asection;
+
+/* These sections are global, and are managed by BFD. The application
+ and target back end are not permitted to change the values in
+ these sections. New code should use the section_ptr macros rather
+ than referring directly to the const sections. The const sections
+ may eventually vanish. */
+#define BFD_ABS_SECTION_NAME "*ABS*"
+#define BFD_UND_SECTION_NAME "*UND*"
+#define BFD_COM_SECTION_NAME "*COM*"
+#define BFD_IND_SECTION_NAME "*IND*"
+
+/* The absolute section. */
+extern asection bfd_abs_section;
+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section)
+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
+/* Pointer to the undefined section. */
+extern asection bfd_und_section;
+#define bfd_und_section_ptr ((asection *) &bfd_und_section)
+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
+/* Pointer to the common section. */
+extern asection bfd_com_section;
+#define bfd_com_section_ptr ((asection *) &bfd_com_section)
+/* Pointer to the indirect section. */
+extern asection bfd_ind_section;
+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section)
+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
+
+#define bfd_is_const_section(SEC) \
+ ( ((SEC) == bfd_abs_section_ptr) \
+ || ((SEC) == bfd_und_section_ptr) \
+ || ((SEC) == bfd_com_section_ptr) \
+ || ((SEC) == bfd_ind_section_ptr))
+
+extern const struct bfd_symbol * const bfd_abs_symbol;
+extern const struct bfd_symbol * const bfd_com_symbol;
+extern const struct bfd_symbol * const bfd_und_symbol;
+extern const struct bfd_symbol * const bfd_ind_symbol;
+
+/* Macros to handle insertion and deletion of a bfd's sections. These
+ only handle the list pointers, ie. do not adjust section_count,
+ target_index etc. */
+#define bfd_section_list_remove(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ asection *_next = _s->next; \
+ asection *_prev = _s->prev; \
+ if (_prev) \
+ _prev->next = _next; \
+ else \
+ (ABFD)->sections = _next; \
+ if (_next) \
+ _next->prev = _prev; \
+ else \
+ (ABFD)->section_last = _prev; \
+ } \
+ while (0)
+#define bfd_section_list_append(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->next = NULL; \
+ if (_abfd->section_last) \
+ { \
+ _s->prev = _abfd->section_last; \
+ _abfd->section_last->next = _s; \
+ } \
+ else \
+ { \
+ _s->prev = NULL; \
+ _abfd->sections = _s; \
+ } \
+ _abfd->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_prepend(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->prev = NULL; \
+ if (_abfd->sections) \
+ { \
+ _s->next = _abfd->sections; \
+ _abfd->sections->prev = _s; \
+ } \
+ else \
+ { \
+ _s->next = NULL; \
+ _abfd->section_last = _s; \
+ } \
+ _abfd->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_after(ABFD, A, S) \
+ do \
+ { \
+ asection *_a = A; \
+ asection *_s = S; \
+ asection *_next = _a->next; \
+ _s->next = _next; \
+ _s->prev = _a; \
+ _a->next = _s; \
+ if (_next) \
+ _next->prev = _s; \
+ else \
+ (ABFD)->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_before(ABFD, B, S) \
+ do \
+ { \
+ asection *_b = B; \
+ asection *_s = S; \
+ asection *_prev = _b->prev; \
+ _s->prev = _prev; \
+ _s->next = _b; \
+ _b->prev = _s; \
+ if (_prev) \
+ _prev->next = _s; \
+ else \
+ (ABFD)->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_removed_from_list(ABFD, S) \
+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
+
+void bfd_section_list_clear (bfd *);
+
+asection *bfd_get_section_by_name (bfd *abfd, const char *name);
+
+asection *bfd_get_section_by_name_if
+ (bfd *abfd,
+ const char *name,
+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+char *bfd_get_unique_section_name
+ (bfd *abfd, const char *templat, int *count);
+
+asection *bfd_make_section_old_way (bfd *abfd, const char *name);
+
+asection *bfd_make_section_anyway_with_flags
+ (bfd *abfd, const char *name, flagword flags);
+
+asection *bfd_make_section_anyway (bfd *abfd, const char *name);
+
+asection *bfd_make_section_with_flags
+ (bfd *, const char *name, flagword flags);
+
+asection *bfd_make_section (bfd *, const char *name);
+
+bfd_boolean bfd_set_section_flags
+ (bfd *abfd, asection *sec, flagword flags);
+
+void bfd_map_over_sections
+ (bfd *abfd,
+ void (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+asection *bfd_sections_find_if
+ (bfd *abfd,
+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+bfd_boolean bfd_set_section_size
+ (bfd *abfd, asection *sec, bfd_size_type val);
+
+bfd_boolean bfd_set_section_contents
+ (bfd *abfd, asection *section, const void *data,
+ file_ptr offset, bfd_size_type count);
+
+bfd_boolean bfd_get_section_contents
+ (bfd *abfd, asection *section, void *location, file_ptr offset,
+ bfd_size_type count);
+
+bfd_boolean bfd_malloc_and_get_section
+ (bfd *abfd, asection *section, bfd_byte **buf);
+
+bfd_boolean bfd_copy_private_section_data
+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
+
+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
+ BFD_SEND (obfd, _bfd_copy_private_section_data, \
+ (ibfd, isection, obfd, osection))
+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
+
+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
+
+/* Extracted from archures.c. */
+enum bfd_architecture
+{
+ bfd_arch_unknown, /* File arch not known. */
+ bfd_arch_obscure, /* Arch known, not one of these. */
+ bfd_arch_m68k, /* Motorola 68xxx */
+#define bfd_mach_m68000 1
+#define bfd_mach_m68008 2
+#define bfd_mach_m68010 3
+#define bfd_mach_m68020 4
+#define bfd_mach_m68030 5
+#define bfd_mach_m68040 6
+#define bfd_mach_m68060 7
+#define bfd_mach_cpu32 8
+#define bfd_mach_mcf5200 9
+#define bfd_mach_mcf5206e 10
+#define bfd_mach_mcf5307 11
+#define bfd_mach_mcf5407 12
+#define bfd_mach_mcf528x 13
+#define bfd_mach_mcfv4e 14
+#define bfd_mach_mcf521x 15
+#define bfd_mach_mcf5249 16
+#define bfd_mach_mcf547x 17
+#define bfd_mach_mcf548x 18
+ bfd_arch_vax, /* DEC Vax */
+ bfd_arch_i960, /* Intel 960 */
+ /* The order of the following is important.
+ lower number indicates a machine type that
+ only accepts a subset of the instructions
+ available to machines with higher numbers.
+ The exception is the "ca", which is
+ incompatible with all other machines except
+ "core". */
+
+#define bfd_mach_i960_core 1
+#define bfd_mach_i960_ka_sa 2
+#define bfd_mach_i960_kb_sb 3
+#define bfd_mach_i960_mc 4
+#define bfd_mach_i960_xa 5
+#define bfd_mach_i960_ca 6
+#define bfd_mach_i960_jx 7
+#define bfd_mach_i960_hx 8
+
+ bfd_arch_or32, /* OpenRISC 32 */
+
+ bfd_arch_a29k, /* AMD 29000 */
+ bfd_arch_sparc, /* SPARC */
+#define bfd_mach_sparc 1
+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
+#define bfd_mach_sparc_sparclet 2
+#define bfd_mach_sparc_sparclite 3
+#define bfd_mach_sparc_v8plus 4
+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_sparclite_le 6
+#define bfd_mach_sparc_v9 7
+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
+/* Nonzero if MACH has the v9 instruction set. */
+#define bfd_mach_sparc_v9_p(mach) \
+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
+ && (mach) != bfd_mach_sparc_sparclite_le)
+/* Nonzero if MACH is a 64 bit sparc architecture. */
+#define bfd_mach_sparc_64bit_p(mach) \
+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
+ bfd_arch_mips, /* MIPS Rxxxx */
+#define bfd_mach_mips3000 3000
+#define bfd_mach_mips3900 3900
+#define bfd_mach_mips4000 4000
+#define bfd_mach_mips4010 4010
+#define bfd_mach_mips4100 4100
+#define bfd_mach_mips4111 4111
+#define bfd_mach_mips4120 4120
+#define bfd_mach_mips4300 4300
+#define bfd_mach_mips4400 4400
+#define bfd_mach_mips4600 4600
+#define bfd_mach_mips4650 4650
+#define bfd_mach_mips5000 5000
+#define bfd_mach_mips5400 5400
+#define bfd_mach_mips5500 5500
+#define bfd_mach_mips6000 6000
+#define bfd_mach_mips7000 7000
+#define bfd_mach_mips8000 8000
+#define bfd_mach_mips9000 9000
+#define bfd_mach_mips10000 10000
+#define bfd_mach_mips12000 12000
+#define bfd_mach_mips16 16
+#define bfd_mach_mips5 5
+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
+#define bfd_mach_mipsisa32 32
+#define bfd_mach_mipsisa32r2 33
+#define bfd_mach_mipsisa64 64
+#define bfd_mach_mipsisa64r2 65
+ bfd_arch_i386, /* Intel 386 */
+#define bfd_mach_i386_i386 1
+#define bfd_mach_i386_i8086 2
+#define bfd_mach_i386_i386_intel_syntax 3
+#define bfd_mach_x86_64 64
+#define bfd_mach_x86_64_intel_syntax 65
+ bfd_arch_we32k, /* AT&T WE32xxx */
+ bfd_arch_tahoe, /* CCI/Harris Tahoe */
+ bfd_arch_i860, /* Intel 860 */
+ bfd_arch_i370, /* IBM 360/370 Mainframes */
+ bfd_arch_romp, /* IBM ROMP PC/RT */
+ bfd_arch_alliant, /* Alliant */
+ bfd_arch_convex, /* Convex */
+ bfd_arch_m88k, /* Motorola 88xxx */
+ bfd_arch_m98k, /* Motorola 98xxx */
+ bfd_arch_pyramid, /* Pyramid Technology */
+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
+#define bfd_mach_h8300 1
+#define bfd_mach_h8300h 2
+#define bfd_mach_h8300s 3
+#define bfd_mach_h8300hn 4
+#define bfd_mach_h8300sn 5
+#define bfd_mach_h8300sx 6
+#define bfd_mach_h8300sxn 7
+ bfd_arch_pdp11, /* DEC PDP-11 */
+ bfd_arch_powerpc, /* PowerPC */
+#define bfd_mach_ppc 32
+#define bfd_mach_ppc64 64
+#define bfd_mach_ppc_403 403
+#define bfd_mach_ppc_403gc 4030
+#define bfd_mach_ppc_505 505
+#define bfd_mach_ppc_601 601
+#define bfd_mach_ppc_602 602
+#define bfd_mach_ppc_603 603
+#define bfd_mach_ppc_ec603e 6031
+#define bfd_mach_ppc_604 604
+#define bfd_mach_ppc_620 620
+#define bfd_mach_ppc_630 630
+#define bfd_mach_ppc_750 750
+#define bfd_mach_ppc_860 860
+#define bfd_mach_ppc_a35 35
+#define bfd_mach_ppc_rs64ii 642
+#define bfd_mach_ppc_rs64iii 643
+#define bfd_mach_ppc_7400 7400
+#define bfd_mach_ppc_e500 500
+ bfd_arch_rs6000, /* IBM RS/6000 */
+#define bfd_mach_rs6k 6000
+#define bfd_mach_rs6k_rs1 6001
+#define bfd_mach_rs6k_rsc 6003
+#define bfd_mach_rs6k_rs2 6002
+ bfd_arch_hppa, /* HP PA RISC */
+#define bfd_mach_hppa10 10
+#define bfd_mach_hppa11 11
+#define bfd_mach_hppa20 20
+#define bfd_mach_hppa20w 25
+ bfd_arch_d10v, /* Mitsubishi D10V */
+#define bfd_mach_d10v 1
+#define bfd_mach_d10v_ts2 2
+#define bfd_mach_d10v_ts3 3
+ bfd_arch_d30v, /* Mitsubishi D30V */
+ bfd_arch_dlx, /* DLX */
+ bfd_arch_m68hc11, /* Motorola 68HC11 */
+ bfd_arch_m68hc12, /* Motorola 68HC12 */
+#define bfd_mach_m6812_default 0
+#define bfd_mach_m6812 1
+#define bfd_mach_m6812s 2
+ bfd_arch_z8k, /* Zilog Z8000 */
+#define bfd_mach_z8001 1
+#define bfd_mach_z8002 2
+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
+#define bfd_mach_sh 1
+#define bfd_mach_sh2 0x20
+#define bfd_mach_sh_dsp 0x2d
+#define bfd_mach_sh2a 0x2a
+#define bfd_mach_sh2a_nofpu 0x2b
+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
+#define bfd_mach_sh2a_or_sh4 0x2a3
+#define bfd_mach_sh2a_or_sh3e 0x2a4
+#define bfd_mach_sh2e 0x2e
+#define bfd_mach_sh3 0x30
+#define bfd_mach_sh3_nommu 0x31
+#define bfd_mach_sh3_dsp 0x3d
+#define bfd_mach_sh3e 0x3e
+#define bfd_mach_sh4 0x40
+#define bfd_mach_sh4_nofpu 0x41
+#define bfd_mach_sh4_nommu_nofpu 0x42
+#define bfd_mach_sh4a 0x4a
+#define bfd_mach_sh4a_nofpu 0x4b
+#define bfd_mach_sh4al_dsp 0x4d
+#define bfd_mach_sh5 0x50
+ bfd_arch_alpha, /* Dec Alpha */
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_arm, /* Advanced Risc Machines ARM. */
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+ bfd_arch_ns32k, /* National Semiconductors ns32000 */
+ bfd_arch_w65, /* WDC 65816 */
+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
+#define bfd_mach_tic3x 30
+#define bfd_mach_tic4x 40
+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
+ bfd_arch_v850, /* NEC V850 */
+#define bfd_mach_v850 1
+#define bfd_mach_v850e 'E'
+#define bfd_mach_v850e1 '1'
+ bfd_arch_arc, /* ARC Cores */
+#define bfd_mach_arc_5 5
+#define bfd_mach_arc_6 6
+#define bfd_mach_arc_7 7
+#define bfd_mach_arc_8 8
+ bfd_arch_m32c, /* Renesas M16C/M32C. */
+#define bfd_mach_m16c 0x75
+#define bfd_mach_m32c 0x78
+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
+#define bfd_mach_m32r 1 /* For backwards compatibility. */
+#define bfd_mach_m32rx 'x'
+#define bfd_mach_m32r2 '2'
+ bfd_arch_mn10200, /* Matsushita MN10200 */
+ bfd_arch_mn10300, /* Matsushita MN10300 */
+#define bfd_mach_mn10300 300
+#define bfd_mach_am33 330
+#define bfd_mach_am33_2 332
+ bfd_arch_fr30,
+#define bfd_mach_fr30 0x46523330
+ bfd_arch_frv,
+#define bfd_mach_frv 1
+#define bfd_mach_frvsimple 2
+#define bfd_mach_fr300 300
+#define bfd_mach_fr400 400
+#define bfd_mach_fr450 450
+#define bfd_mach_frvtomcat 499 /* fr500 prototype */
+#define bfd_mach_fr500 500
+#define bfd_mach_fr550 550
+ bfd_arch_mcore,
+ bfd_arch_ia64, /* HP/Intel ia64 */
+#define bfd_mach_ia64_elf64 64
+#define bfd_mach_ia64_elf32 32
+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
+#define bfd_mach_ip2022 1
+#define bfd_mach_ip2022ext 2
+ bfd_arch_iq2000, /* Vitesse IQ2000. */
+#define bfd_mach_iq2000 1
+#define bfd_mach_iq10 2
+ bfd_arch_ms1,
+#define bfd_mach_ms1 1
+#define bfd_mach_mrisc2 2
+ bfd_arch_pj,
+ bfd_arch_avr, /* Atmel AVR microcontrollers. */
+#define bfd_mach_avr1 1
+#define bfd_mach_avr2 2
+#define bfd_mach_avr3 3
+#define bfd_mach_avr4 4
+#define bfd_mach_avr5 5
+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
+#define bfd_mach_cr16c 1
+ bfd_arch_crx, /* National Semiconductor CRX. */
+#define bfd_mach_crx 1
+ bfd_arch_cris, /* Axis CRIS */
+#define bfd_mach_cris_v0_v10 255
+#define bfd_mach_cris_v32 32
+#define bfd_mach_cris_v10_v32 1032
+ bfd_arch_s390, /* IBM s390 */
+#define bfd_mach_s390_31 31
+#define bfd_mach_s390_64 64
+ bfd_arch_openrisc, /* OpenRISC */
+ bfd_arch_mmix, /* Donald Knuth's educational processor. */
+ bfd_arch_xstormy16,
+#define bfd_mach_xstormy16 1
+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
+#define bfd_mach_msp11 11
+#define bfd_mach_msp110 110
+#define bfd_mach_msp12 12
+#define bfd_mach_msp13 13
+#define bfd_mach_msp14 14
+#define bfd_mach_msp15 15
+#define bfd_mach_msp16 16
+#define bfd_mach_msp31 31
+#define bfd_mach_msp32 32
+#define bfd_mach_msp33 33
+#define bfd_mach_msp41 41
+#define bfd_mach_msp42 42
+#define bfd_mach_msp43 43
+#define bfd_mach_msp44 44
+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
+#define bfd_mach_xtensa 1
+ bfd_arch_maxq, /* Dallas MAXQ 10/20 */
+#define bfd_mach_maxq10 10
+#define bfd_mach_maxq20 20
+ bfd_arch_last
+ };
+
+typedef struct bfd_arch_info
+{
+ int bits_per_word;
+ int bits_per_address;
+ int bits_per_byte;
+ enum bfd_architecture arch;
+ unsigned long mach;
+ const char *arch_name;
+ const char *printable_name;
+ unsigned int section_align_power;
+ /* TRUE if this is the default machine for the architecture.
+ The default arch should be the first entry for an arch so that
+ all the entries for that arch can be accessed via <<next>>. */
+ bfd_boolean the_default;
+ const struct bfd_arch_info * (*compatible)
+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
+
+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
+
+ const struct bfd_arch_info *next;
+}
+bfd_arch_info_type;
+
+const char *bfd_printable_name (bfd *abfd);
+
+const bfd_arch_info_type *bfd_scan_arch (const char *string);
+
+const char **bfd_arch_list (void);
+
+const bfd_arch_info_type *bfd_arch_get_compatible
+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
+
+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
+
+enum bfd_architecture bfd_get_arch (bfd *abfd);
+
+unsigned long bfd_get_mach (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_address (bfd *abfd);
+
+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
+
+const bfd_arch_info_type *bfd_lookup_arch
+ (enum bfd_architecture arch, unsigned long machine);
+
+const char *bfd_printable_arch_mach
+ (enum bfd_architecture arch, unsigned long machine);
+
+unsigned int bfd_octets_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_mach_octets_per_byte
+ (enum bfd_architecture arch, unsigned long machine);
+
+/* Extracted from reloc.c. */
+typedef enum bfd_reloc_status
+{
+ /* No errors detected. */
+ bfd_reloc_ok,
+
+ /* The relocation was performed, but there was an overflow. */
+ bfd_reloc_overflow,
+
+ /* The address to relocate was not within the section supplied. */
+ bfd_reloc_outofrange,
+
+ /* Used by special functions. */
+ bfd_reloc_continue,
+
+ /* Unsupported relocation size requested. */
+ bfd_reloc_notsupported,
+
+ /* Unused. */
+ bfd_reloc_other,
+
+ /* The symbol to relocate against was undefined. */
+ bfd_reloc_undefined,
+
+ /* The relocation was performed, but may not be ok - presently
+ generated only when linking i960 coff files with i960 b.out
+ symbols. If this type is returned, the error_message argument
+ to bfd_perform_relocation will be set. */
+ bfd_reloc_dangerous
+ }
+ bfd_reloc_status_type;
+
+
+typedef struct reloc_cache_entry
+{
+ /* A pointer into the canonical table of pointers. */
+ struct bfd_symbol **sym_ptr_ptr;
+
+ /* offset in section. */
+ bfd_size_type address;
+
+ /* addend for relocation value. */
+ bfd_vma addend;
+
+ /* Pointer to how to perform the required relocation. */
+ reloc_howto_type *howto;
+
+}
+arelent;
+
+enum complain_overflow
+{
+ /* Do not complain on overflow. */
+ complain_overflow_dont,
+
+ /* Complain if the bitfield overflows, whether it is considered
+ as signed or unsigned. */
+ complain_overflow_bitfield,
+
+ /* Complain if the value overflows when considered as signed
+ number. */
+ complain_overflow_signed,
+
+ /* Complain if the value overflows when considered as an
+ unsigned number. */
+ complain_overflow_unsigned
+};
+
+struct reloc_howto_struct
+{
+ /* The type field has mainly a documentary use - the back end can
+ do what it wants with it, though normally the back end's
+ external idea of what a reloc number is stored
+ in this field. For example, a PC relative word relocation
+ in a coff environment has the type 023 - because that's
+ what the outside world calls a R_PCRWORD reloc. */
+ unsigned int type;
+
+ /* The value the final relocation is shifted right by. This drops
+ unwanted data from the relocation. */
+ unsigned int rightshift;
+
+ /* The size of the item to be relocated. This is *not* a
+ power-of-two measure. To get the number of bytes operated
+ on by a type of relocation, use bfd_get_reloc_size. */
+ int size;
+
+ /* The number of bits in the item to be relocated. This is used
+ when doing overflow checking. */
+ unsigned int bitsize;
+
+ /* Notes that the relocation is relative to the location in the
+ data section of the addend. The relocation function will
+ subtract from the relocation value the address of the location
+ being relocated. */
+ bfd_boolean pc_relative;
+
+ /* The bit position of the reloc value in the destination.
+ The relocated value is left shifted by this amount. */
+ unsigned int bitpos;
+
+ /* What type of overflow error should be checked for when
+ relocating. */
+ enum complain_overflow complain_on_overflow;
+
+ /* If this field is non null, then the supplied function is
+ called rather than the normal function. This allows really
+ strange relocation methods to be accommodated (e.g., i960 callj
+ instructions). */
+ bfd_reloc_status_type (*special_function)
+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
+ bfd *, char **);
+
+ /* The textual name of the relocation type. */
+ char *name;
+
+ /* Some formats record a relocation addend in the section contents
+ rather than with the relocation. For ELF formats this is the
+ distinction between USE_REL and USE_RELA (though the code checks
+ for USE_REL == 1/0). The value of this field is TRUE if the
+ addend is recorded with the section contents; when performing a
+ partial link (ld -r) the section contents (the data) will be
+ modified. The value of this field is FALSE if addends are
+ recorded with the relocation (in arelent.addend); when performing
+ a partial link the relocation will be modified.
+ All relocations for all ELF USE_RELA targets should set this field
+ to FALSE (values of TRUE should be looked on with suspicion).
+ However, the converse is not true: not all relocations of all ELF
+ USE_REL targets set this field to TRUE. Why this is so is peculiar
+ to each particular target. For relocs that aren't used in partial
+ links (e.g. GOT stuff) it doesn't matter what this is set to. */
+ bfd_boolean partial_inplace;
+
+ /* src_mask selects the part of the instruction (or data) to be used
+ in the relocation sum. If the target relocations don't have an
+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
+ dst_mask to extract the addend from the section contents. If
+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
+ field should be zero. Non-zero values for ELF USE_RELA targets are
+ bogus as in those cases the value in the dst_mask part of the
+ section contents should be treated as garbage. */
+ bfd_vma src_mask;
+
+ /* dst_mask selects which parts of the instruction (or data) are
+ replaced with a relocated value. */
+ bfd_vma dst_mask;
+
+ /* When some formats create PC relative instructions, they leave
+ the value of the pc of the place being relocated in the offset
+ slot of the instruction, so that a PC relative relocation can
+ be made just by adding in an ordinary offset (e.g., sun3 a.out).
+ Some formats leave the displacement part of an instruction
+ empty (e.g., m88k bcs); this flag signals the fact. */
+ bfd_boolean pcrel_offset;
+};
+
+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
+ NAME, FALSE, 0, 0, IN)
+
+#define EMPTY_HOWTO(C) \
+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
+ NULL, FALSE, 0, 0, FALSE)
+
+#define HOWTO_PREPARE(relocation, symbol) \
+ { \
+ if (symbol != NULL) \
+ { \
+ if (bfd_is_com_section (symbol->section)) \
+ { \
+ relocation = 0; \
+ } \
+ else \
+ { \
+ relocation = symbol->value; \
+ } \
+ } \
+ }
+
+unsigned int bfd_get_reloc_size (reloc_howto_type *);
+
+typedef struct relent_chain
+{
+ arelent relent;
+ struct relent_chain *next;
+}
+arelent_chain;
+
+bfd_reloc_status_type bfd_check_overflow
+ (enum complain_overflow how,
+ unsigned int bitsize,
+ unsigned int rightshift,
+ unsigned int addrsize,
+ bfd_vma relocation);
+
+bfd_reloc_status_type bfd_perform_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data,
+ asection *input_section,
+ bfd *output_bfd,
+ char **error_message);
+
+bfd_reloc_status_type bfd_install_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data, bfd_vma data_start,
+ asection *input_section,
+ char **error_message);
+
+enum bfd_reloc_code_real {
+ _dummy_first_bfd_reloc_code_real,
+
+
+/* Basic absolute relocations of N bits. */
+ BFD_RELOC_64,
+ BFD_RELOC_32,
+ BFD_RELOC_26,
+ BFD_RELOC_24,
+ BFD_RELOC_16,
+ BFD_RELOC_14,
+ BFD_RELOC_8,
+
+/* PC-relative relocations. Sometimes these are relative to the address
+of the relocation itself; sometimes they are relative to the start of
+the section containing the relocation. It depends on the specific target.
+
+The 24-bit relocation is used in some Intel 960 configurations. */
+ BFD_RELOC_64_PCREL,
+ BFD_RELOC_32_PCREL,
+ BFD_RELOC_24_PCREL,
+ BFD_RELOC_16_PCREL,
+ BFD_RELOC_12_PCREL,
+ BFD_RELOC_8_PCREL,
+
+/* Section relative relocations. Some targets need this for DWARF2. */
+ BFD_RELOC_32_SECREL,
+
+/* For ELF. */
+ BFD_RELOC_32_GOT_PCREL,
+ BFD_RELOC_16_GOT_PCREL,
+ BFD_RELOC_8_GOT_PCREL,
+ BFD_RELOC_32_GOTOFF,
+ BFD_RELOC_16_GOTOFF,
+ BFD_RELOC_LO16_GOTOFF,
+ BFD_RELOC_HI16_GOTOFF,
+ BFD_RELOC_HI16_S_GOTOFF,
+ BFD_RELOC_8_GOTOFF,
+ BFD_RELOC_64_PLT_PCREL,
+ BFD_RELOC_32_PLT_PCREL,
+ BFD_RELOC_24_PLT_PCREL,
+ BFD_RELOC_16_PLT_PCREL,
+ BFD_RELOC_8_PLT_PCREL,
+ BFD_RELOC_64_PLTOFF,
+ BFD_RELOC_32_PLTOFF,
+ BFD_RELOC_16_PLTOFF,
+ BFD_RELOC_LO16_PLTOFF,
+ BFD_RELOC_HI16_PLTOFF,
+ BFD_RELOC_HI16_S_PLTOFF,
+ BFD_RELOC_8_PLTOFF,
+
+/* Relocations used by 68K ELF. */
+ BFD_RELOC_68K_GLOB_DAT,
+ BFD_RELOC_68K_JMP_SLOT,
+ BFD_RELOC_68K_RELATIVE,
+
+/* Linkage-table relative. */
+ BFD_RELOC_32_BASEREL,
+ BFD_RELOC_16_BASEREL,
+ BFD_RELOC_LO16_BASEREL,
+ BFD_RELOC_HI16_BASEREL,
+ BFD_RELOC_HI16_S_BASEREL,
+ BFD_RELOC_8_BASEREL,
+ BFD_RELOC_RVA,
+
+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
+ BFD_RELOC_8_FFnn,
+
+/* These PC-relative relocations are stored as word displacements --
+i.e., byte displacements shifted right two bits. The 30-bit word
+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
+SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
+signed 16-bit displacement is used on the MIPS, and the 23-bit
+displacement is used on the Alpha. */
+ BFD_RELOC_32_PCREL_S2,
+ BFD_RELOC_16_PCREL_S2,
+ BFD_RELOC_23_PCREL_S2,
+
+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
+the target word. These are used on the SPARC. */
+ BFD_RELOC_HI22,
+ BFD_RELOC_LO10,
+
+/* For systems that allocate a Global Pointer register, these are
+displacements off that register. These relocation types are
+handled specially, because the value the register will have is
+decided relatively late. */
+ BFD_RELOC_GPREL16,
+ BFD_RELOC_GPREL32,
+
+/* Reloc types used for i960/b.out. */
+ BFD_RELOC_I960_CALLJ,
+
+/* SPARC ELF relocations. There is probably some overlap with other
+relocation types already defined. */
+ BFD_RELOC_NONE,
+ BFD_RELOC_SPARC_WDISP22,
+ BFD_RELOC_SPARC22,
+ BFD_RELOC_SPARC13,
+ BFD_RELOC_SPARC_GOT10,
+ BFD_RELOC_SPARC_GOT13,
+ BFD_RELOC_SPARC_GOT22,
+ BFD_RELOC_SPARC_PC10,
+ BFD_RELOC_SPARC_PC22,
+ BFD_RELOC_SPARC_WPLT30,
+ BFD_RELOC_SPARC_COPY,
+ BFD_RELOC_SPARC_GLOB_DAT,
+ BFD_RELOC_SPARC_JMP_SLOT,
+ BFD_RELOC_SPARC_RELATIVE,
+ BFD_RELOC_SPARC_UA16,
+ BFD_RELOC_SPARC_UA32,
+ BFD_RELOC_SPARC_UA64,
+
+/* I think these are specific to SPARC a.out (e.g., Sun 4). */
+ BFD_RELOC_SPARC_BASE13,
+ BFD_RELOC_SPARC_BASE22,
+
+/* SPARC64 relocations */
+#define BFD_RELOC_SPARC_64 BFD_RELOC_64
+ BFD_RELOC_SPARC_10,
+ BFD_RELOC_SPARC_11,
+ BFD_RELOC_SPARC_OLO10,
+ BFD_RELOC_SPARC_HH22,
+ BFD_RELOC_SPARC_HM10,
+ BFD_RELOC_SPARC_LM22,
+ BFD_RELOC_SPARC_PC_HH22,
+ BFD_RELOC_SPARC_PC_HM10,
+ BFD_RELOC_SPARC_PC_LM22,
+ BFD_RELOC_SPARC_WDISP16,
+ BFD_RELOC_SPARC_WDISP19,
+ BFD_RELOC_SPARC_7,
+ BFD_RELOC_SPARC_6,
+ BFD_RELOC_SPARC_5,
+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
+ BFD_RELOC_SPARC_PLT32,
+ BFD_RELOC_SPARC_PLT64,
+ BFD_RELOC_SPARC_HIX22,
+ BFD_RELOC_SPARC_LOX10,
+ BFD_RELOC_SPARC_H44,
+ BFD_RELOC_SPARC_M44,
+ BFD_RELOC_SPARC_L44,
+ BFD_RELOC_SPARC_REGISTER,
+
+/* SPARC little endian relocation */
+ BFD_RELOC_SPARC_REV32,
+
+/* SPARC TLS relocations */
+ BFD_RELOC_SPARC_TLS_GD_HI22,
+ BFD_RELOC_SPARC_TLS_GD_LO10,
+ BFD_RELOC_SPARC_TLS_GD_ADD,
+ BFD_RELOC_SPARC_TLS_GD_CALL,
+ BFD_RELOC_SPARC_TLS_LDM_HI22,
+ BFD_RELOC_SPARC_TLS_LDM_LO10,
+ BFD_RELOC_SPARC_TLS_LDM_ADD,
+ BFD_RELOC_SPARC_TLS_LDM_CALL,
+ BFD_RELOC_SPARC_TLS_LDO_HIX22,
+ BFD_RELOC_SPARC_TLS_LDO_LOX10,
+ BFD_RELOC_SPARC_TLS_LDO_ADD,
+ BFD_RELOC_SPARC_TLS_IE_HI22,
+ BFD_RELOC_SPARC_TLS_IE_LO10,
+ BFD_RELOC_SPARC_TLS_IE_LD,
+ BFD_RELOC_SPARC_TLS_IE_LDX,
+ BFD_RELOC_SPARC_TLS_IE_ADD,
+ BFD_RELOC_SPARC_TLS_LE_HIX22,
+ BFD_RELOC_SPARC_TLS_LE_LOX10,
+ BFD_RELOC_SPARC_TLS_DTPMOD32,
+ BFD_RELOC_SPARC_TLS_DTPMOD64,
+ BFD_RELOC_SPARC_TLS_DTPOFF32,
+ BFD_RELOC_SPARC_TLS_DTPOFF64,
+ BFD_RELOC_SPARC_TLS_TPOFF32,
+ BFD_RELOC_SPARC_TLS_TPOFF64,
+
+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
+"addend" in some special way.
+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
+writing; when reading, it will be the absolute section symbol. The
+addend is the displacement in bytes of the "lda" instruction from
+the "ldah" instruction (which is at the address of this reloc). */
+ BFD_RELOC_ALPHA_GPDISP_HI16,
+
+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
+with GPDISP_HI16 relocs. The addend is ignored when writing the
+relocations out, and is filled in with the file's GP value on
+reading, for convenience. */
+ BFD_RELOC_ALPHA_GPDISP_LO16,
+
+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
+relocation except that there is no accompanying GPDISP_LO16
+relocation. */
+ BFD_RELOC_ALPHA_GPDISP,
+
+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
+the assembler turns it into a LDQ instruction to load the address of
+the symbol, and then fills in a register in the real instruction.
+
+The LITERAL reloc, at the LDQ instruction, refers to the .lita
+section symbol. The addend is ignored when writing, but is filled
+in with the file's GP value on reading, for convenience, as with the
+GPDISP_LO16 reloc.
+
+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
+It should refer to the symbol to be referenced, as with 16_GOTOFF,
+but it generates output not based on the position within the .got
+section, but relative to the GP value chosen for the file during the
+final link stage.
+
+The LITUSE reloc, on the instruction using the loaded address, gives
+information to the linker that it might be able to use to optimize
+away some literal section references. The symbol is ignored (read
+as the absolute section symbol), and the "addend" indicates the type
+of instruction using the register:
+1 - "memory" fmt insn
+2 - byte-manipulation (byte offset reg)
+3 - jsr (target of branch) */
+ BFD_RELOC_ALPHA_LITERAL,
+ BFD_RELOC_ALPHA_ELF_LITERAL,
+ BFD_RELOC_ALPHA_LITUSE,
+
+/* The HINT relocation indicates a value that should be filled into the
+"hint" field of a jmp/jsr/ret instruction, for possible branch-
+prediction logic which may be provided on some processors. */
+ BFD_RELOC_ALPHA_HINT,
+
+/* The LINKAGE relocation outputs a linkage pair in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_LINKAGE,
+
+/* The CODEADDR relocation outputs a STO_CA in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_CODEADDR,
+
+/* The GPREL_HI/LO relocations together form a 32-bit offset from the
+GP register. */
+ BFD_RELOC_ALPHA_GPREL_HI16,
+ BFD_RELOC_ALPHA_GPREL_LO16,
+
+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
+share a common GP, and the target address is adjusted for
+STO_ALPHA_STD_GPLOAD. */
+ BFD_RELOC_ALPHA_BRSGP,
+
+/* Alpha thread-local storage relocations. */
+ BFD_RELOC_ALPHA_TLSGD,
+ BFD_RELOC_ALPHA_TLSLDM,
+ BFD_RELOC_ALPHA_DTPMOD64,
+ BFD_RELOC_ALPHA_GOTDTPREL16,
+ BFD_RELOC_ALPHA_DTPREL64,
+ BFD_RELOC_ALPHA_DTPREL_HI16,
+ BFD_RELOC_ALPHA_DTPREL_LO16,
+ BFD_RELOC_ALPHA_DTPREL16,
+ BFD_RELOC_ALPHA_GOTTPREL16,
+ BFD_RELOC_ALPHA_TPREL64,
+ BFD_RELOC_ALPHA_TPREL_HI16,
+ BFD_RELOC_ALPHA_TPREL_LO16,
+ BFD_RELOC_ALPHA_TPREL16,
+
+/* Bits 27..2 of the relocation address shifted right 2 bits;
+simple reloc otherwise. */
+ BFD_RELOC_MIPS_JMP,
+
+/* The MIPS16 jump instruction. */
+ BFD_RELOC_MIPS16_JMP,
+
+/* MIPS16 GP relative reloc. */
+ BFD_RELOC_MIPS16_GPREL,
+
+/* High 16 bits of 32-bit value; simple reloc. */
+ BFD_RELOC_HI16,
+
+/* High 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_HI16_S,
+
+/* Low 16 bits. */
+ BFD_RELOC_LO16,
+
+/* High 16 bits of 32-bit pc-relative value */
+ BFD_RELOC_HI16_PCREL,
+
+/* High 16 bits of 32-bit pc-relative value, adjusted */
+ BFD_RELOC_HI16_S_PCREL,
+
+/* Low 16 bits of pc-relative value */
+ BFD_RELOC_LO16_PCREL,
+
+/* MIPS16 high 16 bits of 32-bit value. */
+ BFD_RELOC_MIPS16_HI16,
+
+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_MIPS16_HI16_S,
+
+/* MIPS16 low 16 bits. */
+ BFD_RELOC_MIPS16_LO16,
+
+/* Relocation against a MIPS literal section. */
+ BFD_RELOC_MIPS_LITERAL,
+
+/* MIPS ELF relocations. */
+ BFD_RELOC_MIPS_GOT16,
+ BFD_RELOC_MIPS_CALL16,
+ BFD_RELOC_MIPS_GOT_HI16,
+ BFD_RELOC_MIPS_GOT_LO16,
+ BFD_RELOC_MIPS_CALL_HI16,
+ BFD_RELOC_MIPS_CALL_LO16,
+ BFD_RELOC_MIPS_SUB,
+ BFD_RELOC_MIPS_GOT_PAGE,
+ BFD_RELOC_MIPS_GOT_OFST,
+ BFD_RELOC_MIPS_GOT_DISP,
+ BFD_RELOC_MIPS_SHIFT5,
+ BFD_RELOC_MIPS_SHIFT6,
+ BFD_RELOC_MIPS_INSERT_A,
+ BFD_RELOC_MIPS_INSERT_B,
+ BFD_RELOC_MIPS_DELETE,
+ BFD_RELOC_MIPS_HIGHEST,
+ BFD_RELOC_MIPS_HIGHER,
+ BFD_RELOC_MIPS_SCN_DISP,
+ BFD_RELOC_MIPS_REL16,
+ BFD_RELOC_MIPS_RELGOT,
+ BFD_RELOC_MIPS_JALR,
+ BFD_RELOC_MIPS_TLS_DTPMOD32,
+ BFD_RELOC_MIPS_TLS_DTPREL32,
+ BFD_RELOC_MIPS_TLS_DTPMOD64,
+ BFD_RELOC_MIPS_TLS_DTPREL64,
+ BFD_RELOC_MIPS_TLS_GD,
+ BFD_RELOC_MIPS_TLS_LDM,
+ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
+ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
+ BFD_RELOC_MIPS_TLS_GOTTPREL,
+ BFD_RELOC_MIPS_TLS_TPREL32,
+ BFD_RELOC_MIPS_TLS_TPREL64,
+ BFD_RELOC_MIPS_TLS_TPREL_HI16,
+ BFD_RELOC_MIPS_TLS_TPREL_LO16,
+
+
+/* Fujitsu Frv Relocations. */
+ BFD_RELOC_FRV_LABEL16,
+ BFD_RELOC_FRV_LABEL24,
+ BFD_RELOC_FRV_LO16,
+ BFD_RELOC_FRV_HI16,
+ BFD_RELOC_FRV_GPREL12,
+ BFD_RELOC_FRV_GPRELU12,
+ BFD_RELOC_FRV_GPREL32,
+ BFD_RELOC_FRV_GPRELHI,
+ BFD_RELOC_FRV_GPRELLO,
+ BFD_RELOC_FRV_GOT12,
+ BFD_RELOC_FRV_GOTHI,
+ BFD_RELOC_FRV_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC,
+ BFD_RELOC_FRV_FUNCDESC_GOT12,
+ BFD_RELOC_FRV_FUNCDESC_GOTHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC_VALUE,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_FRV_GOTOFF12,
+ BFD_RELOC_FRV_GOTOFFHI,
+ BFD_RELOC_FRV_GOTOFFLO,
+ BFD_RELOC_FRV_GETTLSOFF,
+ BFD_RELOC_FRV_TLSDESC_VALUE,
+ BFD_RELOC_FRV_GOTTLSDESC12,
+ BFD_RELOC_FRV_GOTTLSDESCHI,
+ BFD_RELOC_FRV_GOTTLSDESCLO,
+ BFD_RELOC_FRV_TLSMOFF12,
+ BFD_RELOC_FRV_TLSMOFFHI,
+ BFD_RELOC_FRV_TLSMOFFLO,
+ BFD_RELOC_FRV_GOTTLSOFF12,
+ BFD_RELOC_FRV_GOTTLSOFFHI,
+ BFD_RELOC_FRV_GOTTLSOFFLO,
+ BFD_RELOC_FRV_TLSOFF,
+ BFD_RELOC_FRV_TLSDESC_RELAX,
+ BFD_RELOC_FRV_GETTLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSMOFF,
+
+
+/* This is a 24bit GOT-relative reloc for the mn10300. */
+ BFD_RELOC_MN10300_GOTOFF24,
+
+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT32,
+
+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT24,
+
+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT16,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_MN10300_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_MN10300_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_MN10300_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_MN10300_RELATIVE,
+
+
+/* i386/elf relocations */
+ BFD_RELOC_386_GOT32,
+ BFD_RELOC_386_PLT32,
+ BFD_RELOC_386_COPY,
+ BFD_RELOC_386_GLOB_DAT,
+ BFD_RELOC_386_JUMP_SLOT,
+ BFD_RELOC_386_RELATIVE,
+ BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_386_GOTPC,
+ BFD_RELOC_386_TLS_TPOFF,
+ BFD_RELOC_386_TLS_IE,
+ BFD_RELOC_386_TLS_GOTIE,
+ BFD_RELOC_386_TLS_LE,
+ BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_386_TLS_LDM,
+ BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_386_TLS_DTPMOD32,
+ BFD_RELOC_386_TLS_DTPOFF32,
+ BFD_RELOC_386_TLS_TPOFF32,
+
+/* x86-64/elf relocations */
+ BFD_RELOC_X86_64_GOT32,
+ BFD_RELOC_X86_64_PLT32,
+ BFD_RELOC_X86_64_COPY,
+ BFD_RELOC_X86_64_GLOB_DAT,
+ BFD_RELOC_X86_64_JUMP_SLOT,
+ BFD_RELOC_X86_64_RELATIVE,
+ BFD_RELOC_X86_64_GOTPCREL,
+ BFD_RELOC_X86_64_32S,
+ BFD_RELOC_X86_64_DTPMOD64,
+ BFD_RELOC_X86_64_DTPOFF64,
+ BFD_RELOC_X86_64_TPOFF64,
+ BFD_RELOC_X86_64_TLSGD,
+ BFD_RELOC_X86_64_TLSLD,
+ BFD_RELOC_X86_64_DTPOFF32,
+ BFD_RELOC_X86_64_GOTTPOFF,
+ BFD_RELOC_X86_64_TPOFF32,
+ BFD_RELOC_X86_64_GOTOFF64,
+ BFD_RELOC_X86_64_GOTPC32,
+
+/* ns32k relocations */
+ BFD_RELOC_NS32K_IMM_8,
+ BFD_RELOC_NS32K_IMM_16,
+ BFD_RELOC_NS32K_IMM_32,
+ BFD_RELOC_NS32K_IMM_8_PCREL,
+ BFD_RELOC_NS32K_IMM_16_PCREL,
+ BFD_RELOC_NS32K_IMM_32_PCREL,
+ BFD_RELOC_NS32K_DISP_8,
+ BFD_RELOC_NS32K_DISP_16,
+ BFD_RELOC_NS32K_DISP_32,
+ BFD_RELOC_NS32K_DISP_8_PCREL,
+ BFD_RELOC_NS32K_DISP_16_PCREL,
+ BFD_RELOC_NS32K_DISP_32_PCREL,
+
+/* PDP11 relocations */
+ BFD_RELOC_PDP11_DISP_8_PCREL,
+ BFD_RELOC_PDP11_DISP_6_PCREL,
+
+/* Picojava relocs. Not all of these appear in object files. */
+ BFD_RELOC_PJ_CODE_HI16,
+ BFD_RELOC_PJ_CODE_LO16,
+ BFD_RELOC_PJ_CODE_DIR16,
+ BFD_RELOC_PJ_CODE_DIR32,
+ BFD_RELOC_PJ_CODE_REL16,
+ BFD_RELOC_PJ_CODE_REL32,
+
+/* Power(rs6000) and PowerPC relocations. */
+ BFD_RELOC_PPC_B26,
+ BFD_RELOC_PPC_BA26,
+ BFD_RELOC_PPC_TOC16,
+ BFD_RELOC_PPC_B16,
+ BFD_RELOC_PPC_B16_BRTAKEN,
+ BFD_RELOC_PPC_B16_BRNTAKEN,
+ BFD_RELOC_PPC_BA16,
+ BFD_RELOC_PPC_BA16_BRTAKEN,
+ BFD_RELOC_PPC_BA16_BRNTAKEN,
+ BFD_RELOC_PPC_COPY,
+ BFD_RELOC_PPC_GLOB_DAT,
+ BFD_RELOC_PPC_JMP_SLOT,
+ BFD_RELOC_PPC_RELATIVE,
+ BFD_RELOC_PPC_LOCAL24PC,
+ BFD_RELOC_PPC_EMB_NADDR32,
+ BFD_RELOC_PPC_EMB_NADDR16,
+ BFD_RELOC_PPC_EMB_NADDR16_LO,
+ BFD_RELOC_PPC_EMB_NADDR16_HI,
+ BFD_RELOC_PPC_EMB_NADDR16_HA,
+ BFD_RELOC_PPC_EMB_SDAI16,
+ BFD_RELOC_PPC_EMB_SDA2I16,
+ BFD_RELOC_PPC_EMB_SDA2REL,
+ BFD_RELOC_PPC_EMB_SDA21,
+ BFD_RELOC_PPC_EMB_MRKREF,
+ BFD_RELOC_PPC_EMB_RELSEC16,
+ BFD_RELOC_PPC_EMB_RELST_LO,
+ BFD_RELOC_PPC_EMB_RELST_HI,
+ BFD_RELOC_PPC_EMB_RELST_HA,
+ BFD_RELOC_PPC_EMB_BIT_FLD,
+ BFD_RELOC_PPC_EMB_RELSDA,
+ BFD_RELOC_PPC64_HIGHER,
+ BFD_RELOC_PPC64_HIGHER_S,
+ BFD_RELOC_PPC64_HIGHEST,
+ BFD_RELOC_PPC64_HIGHEST_S,
+ BFD_RELOC_PPC64_TOC16_LO,
+ BFD_RELOC_PPC64_TOC16_HI,
+ BFD_RELOC_PPC64_TOC16_HA,
+ BFD_RELOC_PPC64_TOC,
+ BFD_RELOC_PPC64_PLTGOT16,
+ BFD_RELOC_PPC64_PLTGOT16_LO,
+ BFD_RELOC_PPC64_PLTGOT16_HI,
+ BFD_RELOC_PPC64_PLTGOT16_HA,
+ BFD_RELOC_PPC64_ADDR16_DS,
+ BFD_RELOC_PPC64_ADDR16_LO_DS,
+ BFD_RELOC_PPC64_GOT16_DS,
+ BFD_RELOC_PPC64_GOT16_LO_DS,
+ BFD_RELOC_PPC64_PLT16_LO_DS,
+ BFD_RELOC_PPC64_SECTOFF_DS,
+ BFD_RELOC_PPC64_SECTOFF_LO_DS,
+ BFD_RELOC_PPC64_TOC16_DS,
+ BFD_RELOC_PPC64_TOC16_LO_DS,
+ BFD_RELOC_PPC64_PLTGOT16_DS,
+ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
+
+/* PowerPC and PowerPC64 thread-local storage relocations. */
+ BFD_RELOC_PPC_TLS,
+ BFD_RELOC_PPC_DTPMOD,
+ BFD_RELOC_PPC_TPREL16,
+ BFD_RELOC_PPC_TPREL16_LO,
+ BFD_RELOC_PPC_TPREL16_HI,
+ BFD_RELOC_PPC_TPREL16_HA,
+ BFD_RELOC_PPC_TPREL,
+ BFD_RELOC_PPC_DTPREL16,
+ BFD_RELOC_PPC_DTPREL16_LO,
+ BFD_RELOC_PPC_DTPREL16_HI,
+ BFD_RELOC_PPC_DTPREL16_HA,
+ BFD_RELOC_PPC_DTPREL,
+ BFD_RELOC_PPC_GOT_TLSGD16,
+ BFD_RELOC_PPC_GOT_TLSGD16_LO,
+ BFD_RELOC_PPC_GOT_TLSGD16_HI,
+ BFD_RELOC_PPC_GOT_TLSGD16_HA,
+ BFD_RELOC_PPC_GOT_TLSLD16,
+ BFD_RELOC_PPC_GOT_TLSLD16_LO,
+ BFD_RELOC_PPC_GOT_TLSLD16_HI,
+ BFD_RELOC_PPC_GOT_TLSLD16_HA,
+ BFD_RELOC_PPC_GOT_TPREL16,
+ BFD_RELOC_PPC_GOT_TPREL16_LO,
+ BFD_RELOC_PPC_GOT_TPREL16_HI,
+ BFD_RELOC_PPC_GOT_TPREL16_HA,
+ BFD_RELOC_PPC_GOT_DTPREL16,
+ BFD_RELOC_PPC_GOT_DTPREL16_LO,
+ BFD_RELOC_PPC_GOT_DTPREL16_HI,
+ BFD_RELOC_PPC_GOT_DTPREL16_HA,
+ BFD_RELOC_PPC64_TPREL16_DS,
+ BFD_RELOC_PPC64_TPREL16_LO_DS,
+ BFD_RELOC_PPC64_TPREL16_HIGHER,
+ BFD_RELOC_PPC64_TPREL16_HIGHERA,
+ BFD_RELOC_PPC64_TPREL16_HIGHEST,
+ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
+ BFD_RELOC_PPC64_DTPREL16_DS,
+ BFD_RELOC_PPC64_DTPREL16_LO_DS,
+ BFD_RELOC_PPC64_DTPREL16_HIGHER,
+ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
+ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
+
+/* IBM 370/390 relocations */
+ BFD_RELOC_I370_D12,
+
+/* The type of reloc used to build a constructor table - at the moment
+probably a 32 bit wide absolute relocation, but the target can choose.
+It generally does map to one of the other relocation types. */
+ BFD_RELOC_CTOR,
+
+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. */
+ BFD_RELOC_ARM_PCREL_BRANCH,
+
+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_ARM_PCREL_BLX,
+
+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_THUMB_PCREL_BLX,
+
+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
+The lowest bit must be zero and is not stored in the instruction.
+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
+"nn" one smaller in all cases. Note further that BRANCH23
+corresponds to R_ARM_THM_CALL. */
+ BFD_RELOC_THUMB_PCREL_BRANCH7,
+ BFD_RELOC_THUMB_PCREL_BRANCH9,
+ BFD_RELOC_THUMB_PCREL_BRANCH12,
+ BFD_RELOC_THUMB_PCREL_BRANCH20,
+ BFD_RELOC_THUMB_PCREL_BRANCH23,
+ BFD_RELOC_THUMB_PCREL_BRANCH25,
+
+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
+ BFD_RELOC_ARM_OFFSET_IMM,
+
+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
+ BFD_RELOC_ARM_THUMB_OFFSET,
+
+/* Pc-relative or absolute relocation depending on target. Used for
+entries in .init_array sections. */
+ BFD_RELOC_ARM_TARGET1,
+
+/* Read-only segment base relative address. */
+ BFD_RELOC_ARM_ROSEGREL32,
+
+/* Data segment base relative address. */
+ BFD_RELOC_ARM_SBREL32,
+
+/* This reloc is used for references to RTTI data from exception handling
+tables. The actual definition depends on the target. It may be a
+pc-relative or some form of GOT-indirect relocation. */
+ BFD_RELOC_ARM_TARGET2,
+
+/* 31-bit PC relative address. */
+ BFD_RELOC_ARM_PREL31,
+
+/* Relocations for setting up GOTs and PLTs for shared libraries. */
+ BFD_RELOC_ARM_JUMP_SLOT,
+ BFD_RELOC_ARM_GLOB_DAT,
+ BFD_RELOC_ARM_GOT32,
+ BFD_RELOC_ARM_PLT32,
+ BFD_RELOC_ARM_RELATIVE,
+ BFD_RELOC_ARM_GOTOFF,
+ BFD_RELOC_ARM_GOTPC,
+
+/* ARM thread-local storage relocations. */
+ BFD_RELOC_ARM_TLS_GD32,
+ BFD_RELOC_ARM_TLS_LDO32,
+ BFD_RELOC_ARM_TLS_LDM32,
+ BFD_RELOC_ARM_TLS_DTPOFF32,
+ BFD_RELOC_ARM_TLS_DTPMOD32,
+ BFD_RELOC_ARM_TLS_TPOFF32,
+ BFD_RELOC_ARM_TLS_IE32,
+ BFD_RELOC_ARM_TLS_LE32,
+
+/* These relocs are only used within the ARM assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_ARM_IMMEDIATE,
+ BFD_RELOC_ARM_ADRL_IMMEDIATE,
+ BFD_RELOC_ARM_T32_IMMEDIATE,
+ BFD_RELOC_ARM_SHIFT_IMM,
+ BFD_RELOC_ARM_SMI,
+ BFD_RELOC_ARM_SWI,
+ BFD_RELOC_ARM_MULTI,
+ BFD_RELOC_ARM_CP_OFF_IMM,
+ BFD_RELOC_ARM_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_ADR_IMM,
+ BFD_RELOC_ARM_LDR_IMM,
+ BFD_RELOC_ARM_LITERAL,
+ BFD_RELOC_ARM_IN_POOL,
+ BFD_RELOC_ARM_OFFSET_IMM8,
+ BFD_RELOC_ARM_T32_OFFSET_U8,
+ BFD_RELOC_ARM_T32_OFFSET_IMM,
+ BFD_RELOC_ARM_HWLITERAL,
+ BFD_RELOC_ARM_THUMB_ADD,
+ BFD_RELOC_ARM_THUMB_IMM,
+ BFD_RELOC_ARM_THUMB_SHIFT,
+
+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
+ BFD_RELOC_SH_PCDISP8BY2,
+ BFD_RELOC_SH_PCDISP12BY2,
+ BFD_RELOC_SH_IMM3,
+ BFD_RELOC_SH_IMM3U,
+ BFD_RELOC_SH_DISP12,
+ BFD_RELOC_SH_DISP12BY2,
+ BFD_RELOC_SH_DISP12BY4,
+ BFD_RELOC_SH_DISP12BY8,
+ BFD_RELOC_SH_DISP20,
+ BFD_RELOC_SH_DISP20BY8,
+ BFD_RELOC_SH_IMM4,
+ BFD_RELOC_SH_IMM4BY2,
+ BFD_RELOC_SH_IMM4BY4,
+ BFD_RELOC_SH_IMM8,
+ BFD_RELOC_SH_IMM8BY2,
+ BFD_RELOC_SH_IMM8BY4,
+ BFD_RELOC_SH_PCRELIMM8BY2,
+ BFD_RELOC_SH_PCRELIMM8BY4,
+ BFD_RELOC_SH_SWITCH16,
+ BFD_RELOC_SH_SWITCH32,
+ BFD_RELOC_SH_USES,
+ BFD_RELOC_SH_COUNT,
+ BFD_RELOC_SH_ALIGN,
+ BFD_RELOC_SH_CODE,
+ BFD_RELOC_SH_DATA,
+ BFD_RELOC_SH_LABEL,
+ BFD_RELOC_SH_LOOP_START,
+ BFD_RELOC_SH_LOOP_END,
+ BFD_RELOC_SH_COPY,
+ BFD_RELOC_SH_GLOB_DAT,
+ BFD_RELOC_SH_JMP_SLOT,
+ BFD_RELOC_SH_RELATIVE,
+ BFD_RELOC_SH_GOTPC,
+ BFD_RELOC_SH_GOT_LOW16,
+ BFD_RELOC_SH_GOT_MEDLOW16,
+ BFD_RELOC_SH_GOT_MEDHI16,
+ BFD_RELOC_SH_GOT_HI16,
+ BFD_RELOC_SH_GOTPLT_LOW16,
+ BFD_RELOC_SH_GOTPLT_MEDLOW16,
+ BFD_RELOC_SH_GOTPLT_MEDHI16,
+ BFD_RELOC_SH_GOTPLT_HI16,
+ BFD_RELOC_SH_PLT_LOW16,
+ BFD_RELOC_SH_PLT_MEDLOW16,
+ BFD_RELOC_SH_PLT_MEDHI16,
+ BFD_RELOC_SH_PLT_HI16,
+ BFD_RELOC_SH_GOTOFF_LOW16,
+ BFD_RELOC_SH_GOTOFF_MEDLOW16,
+ BFD_RELOC_SH_GOTOFF_MEDHI16,
+ BFD_RELOC_SH_GOTOFF_HI16,
+ BFD_RELOC_SH_GOTPC_LOW16,
+ BFD_RELOC_SH_GOTPC_MEDLOW16,
+ BFD_RELOC_SH_GOTPC_MEDHI16,
+ BFD_RELOC_SH_GOTPC_HI16,
+ BFD_RELOC_SH_COPY64,
+ BFD_RELOC_SH_GLOB_DAT64,
+ BFD_RELOC_SH_JMP_SLOT64,
+ BFD_RELOC_SH_RELATIVE64,
+ BFD_RELOC_SH_GOT10BY4,
+ BFD_RELOC_SH_GOT10BY8,
+ BFD_RELOC_SH_GOTPLT10BY4,
+ BFD_RELOC_SH_GOTPLT10BY8,
+ BFD_RELOC_SH_GOTPLT32,
+ BFD_RELOC_SH_SHMEDIA_CODE,
+ BFD_RELOC_SH_IMMU5,
+ BFD_RELOC_SH_IMMS6,
+ BFD_RELOC_SH_IMMS6BY32,
+ BFD_RELOC_SH_IMMU6,
+ BFD_RELOC_SH_IMMS10,
+ BFD_RELOC_SH_IMMS10BY2,
+ BFD_RELOC_SH_IMMS10BY4,
+ BFD_RELOC_SH_IMMS10BY8,
+ BFD_RELOC_SH_IMMS16,
+ BFD_RELOC_SH_IMMU16,
+ BFD_RELOC_SH_IMM_LOW16,
+ BFD_RELOC_SH_IMM_LOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDLOW16,
+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDHI16,
+ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
+ BFD_RELOC_SH_IMM_HI16,
+ BFD_RELOC_SH_IMM_HI16_PCREL,
+ BFD_RELOC_SH_PT_16,
+ BFD_RELOC_SH_TLS_GD_32,
+ BFD_RELOC_SH_TLS_LD_32,
+ BFD_RELOC_SH_TLS_LDO_32,
+ BFD_RELOC_SH_TLS_IE_32,
+ BFD_RELOC_SH_TLS_LE_32,
+ BFD_RELOC_SH_TLS_DTPMOD32,
+ BFD_RELOC_SH_TLS_DTPOFF32,
+ BFD_RELOC_SH_TLS_TPOFF32,
+
+/* ARC Cores relocs.
+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. The high 20 bits are installed in bits 26
+through 7 of the instruction. */
+ BFD_RELOC_ARC_B22_PCREL,
+
+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
+stored in the instruction. The high 24 bits are installed in bits 23
+through 0. */
+ BFD_RELOC_ARC_B26,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_10_PCREL_R,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. This is the same as the previous reloc
+except it is in the left container, i.e.,
+shifted left 15 bits. */
+ BFD_RELOC_D10V_10_PCREL_L,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18_PCREL,
+
+/* Mitsubishi D30V relocs.
+This is a 6-bit absolute reloc. */
+ BFD_RELOC_D30V_6,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_9_PCREL,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_9_PCREL_R,
+
+/* This is a 12-bit absolute reloc with the
+right 3 bitsassumed to be 0. */
+ BFD_RELOC_D30V_15,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_15_PCREL,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_15_PCREL_R,
+
+/* This is an 18-bit absolute reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21_PCREL,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_21_PCREL_R,
+
+/* This is a 32-bit absolute reloc. */
+ BFD_RELOC_D30V_32,
+
+/* This is a 32-bit pc-relative reloc. */
+ BFD_RELOC_D30V_32_PCREL,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_HI16_S,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_LO16,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_JMP26,
+
+/* Renesas M16C/M32C Relocations. */
+ BFD_RELOC_M16C_8_PCREL8,
+ BFD_RELOC_M16C_16_PCREL8,
+ BFD_RELOC_M16C_8_PCREL16,
+ BFD_RELOC_M16C_8_ELABEL24,
+ BFD_RELOC_M16C_8_ABS16,
+ BFD_RELOC_M16C_16_ABS16,
+ BFD_RELOC_M16C_16_ABS24,
+ BFD_RELOC_M16C_16_ABS32,
+ BFD_RELOC_M16C_24_ABS16,
+ BFD_RELOC_M16C_24_ABS24,
+ BFD_RELOC_M16C_24_ABS32,
+ BFD_RELOC_M16C_32_ABS16,
+ BFD_RELOC_M16C_32_ABS24,
+ BFD_RELOC_M16C_32_ABS32,
+ BFD_RELOC_M16C_40_ABS16,
+ BFD_RELOC_M16C_40_ABS24,
+ BFD_RELOC_M16C_40_ABS32,
+
+/* Renesas M32R (formerly Mitsubishi M32R) relocs.
+This is a 24 bit absolute address. */
+ BFD_RELOC_M32R_24,
+
+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_10_PCREL,
+
+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_18_PCREL,
+
+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_26_PCREL,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as unsigned. */
+ BFD_RELOC_M32R_HI16_ULO,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as signed. */
+ BFD_RELOC_M32R_HI16_SLO,
+
+/* This is a 16-bit reloc containing the lower 16 bits of an address. */
+ BFD_RELOC_M32R_LO16,
+
+/* This is a 16-bit reloc containing the small data area offset for use in
+add3, load, and store instructions. */
+ BFD_RELOC_M32R_SDA16,
+
+/* For PIC. */
+ BFD_RELOC_M32R_GOT24,
+ BFD_RELOC_M32R_26_PLTREL,
+ BFD_RELOC_M32R_COPY,
+ BFD_RELOC_M32R_GLOB_DAT,
+ BFD_RELOC_M32R_JMP_SLOT,
+ BFD_RELOC_M32R_RELATIVE,
+ BFD_RELOC_M32R_GOTOFF,
+ BFD_RELOC_M32R_GOTOFF_HI_ULO,
+ BFD_RELOC_M32R_GOTOFF_HI_SLO,
+ BFD_RELOC_M32R_GOTOFF_LO,
+ BFD_RELOC_M32R_GOTPC24,
+ BFD_RELOC_M32R_GOT16_HI_ULO,
+ BFD_RELOC_M32R_GOT16_HI_SLO,
+ BFD_RELOC_M32R_GOT16_LO,
+ BFD_RELOC_M32R_GOTPC_HI_ULO,
+ BFD_RELOC_M32R_GOTPC_HI_SLO,
+ BFD_RELOC_M32R_GOTPC_LO,
+
+/* This is a 9-bit reloc */
+ BFD_RELOC_V850_9_PCREL,
+
+/* This is a 22-bit reloc */
+ BFD_RELOC_V850_22_PCREL,
+
+/* This is a 16 bit offset from the short data area pointer. */
+ BFD_RELOC_V850_SDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+short data area pointer. */
+ BFD_RELOC_V850_SDA_15_16_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer. */
+ BFD_RELOC_V850_ZDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+zero data area pointer. */
+ BFD_RELOC_V850_ZDA_15_16_OFFSET,
+
+/* This is an 8 bit offset (of which only 6 bits are used) from the
+tiny data area pointer. */
+ BFD_RELOC_V850_TDA_6_8_OFFSET,
+
+/* This is an 8bit offset (of which only 7 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_7_8_OFFSET,
+
+/* This is a 7 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_7_7_OFFSET,
+
+/* This is a 16 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_16_16_OFFSET,
+
+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_4_5_OFFSET,
+
+/* This is a 4 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_4_4_OFFSET,
+
+/* This is a 16 bit offset from the short data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
+
+/* This is a 6 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_6_7_OFFSET,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_16_16_OFFSET,
+
+/* Used for relaxing indirect function calls. */
+ BFD_RELOC_V850_LONGCALL,
+
+/* Used for relaxing indirect jumps. */
+ BFD_RELOC_V850_LONGJUMP,
+
+/* Used to maintain alignment whilst relaxing. */
+ BFD_RELOC_V850_ALIGN,
+
+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
+instructions. */
+ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
+
+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_32_PCREL,
+
+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_16_PCREL,
+
+/* This is a 8bit DP reloc for the tms320c30, where the most
+significant 8 bits of a 24 bit word are placed into the least
+significant 8 bits of the opcode. */
+ BFD_RELOC_TIC30_LDP,
+
+/* This is a 7bit reloc for the tms320c54x, where the least
+significant 7 bits of a 16 bit word are placed into the least
+significant 7 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTLS7,
+
+/* This is a 9bit DP reloc for the tms320c54x, where the most
+significant 9 bits of a 16 bit word are placed into the least
+significant 9 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTMS9,
+
+/* This is an extended address 23-bit reloc for the tms320c54x. */
+ BFD_RELOC_TIC54X_23,
+
+/* This is a 16-bit reloc for the tms320c54x, where the least
+significant 16 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_16_OF_23,
+
+/* This is a reloc for the tms320c54x, where the most
+significant 7 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_MS7_OF_23,
+
+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
+ BFD_RELOC_FR30_48,
+
+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
+two sections. */
+ BFD_RELOC_FR30_20,
+
+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
+4 bits. */
+ BFD_RELOC_FR30_6_IN_4,
+
+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
+into 8 bits. */
+ BFD_RELOC_FR30_8_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
+into 8 bits. */
+ BFD_RELOC_FR30_9_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
+into 8 bits. */
+ BFD_RELOC_FR30_10_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
+short offset into 8 bits. */
+ BFD_RELOC_FR30_9_PCREL,
+
+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
+short offset into 11 bits. */
+ BFD_RELOC_FR30_12_PCREL,
+
+/* Motorola Mcore relocations. */
+ BFD_RELOC_MCORE_PCREL_IMM8BY4,
+ BFD_RELOC_MCORE_PCREL_IMM11BY2,
+ BFD_RELOC_MCORE_PCREL_IMM4BY2,
+ BFD_RELOC_MCORE_PCREL_32,
+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
+ BFD_RELOC_MCORE_RVA,
+
+/* These are relocations for the GETA instruction. */
+ BFD_RELOC_MMIX_GETA,
+ BFD_RELOC_MMIX_GETA_1,
+ BFD_RELOC_MMIX_GETA_2,
+ BFD_RELOC_MMIX_GETA_3,
+
+/* These are relocations for a conditional branch instruction. */
+ BFD_RELOC_MMIX_CBRANCH,
+ BFD_RELOC_MMIX_CBRANCH_J,
+ BFD_RELOC_MMIX_CBRANCH_1,
+ BFD_RELOC_MMIX_CBRANCH_2,
+ BFD_RELOC_MMIX_CBRANCH_3,
+
+/* These are relocations for the PUSHJ instruction. */
+ BFD_RELOC_MMIX_PUSHJ,
+ BFD_RELOC_MMIX_PUSHJ_1,
+ BFD_RELOC_MMIX_PUSHJ_2,
+ BFD_RELOC_MMIX_PUSHJ_3,
+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
+
+/* These are relocations for the JMP instruction. */
+ BFD_RELOC_MMIX_JMP,
+ BFD_RELOC_MMIX_JMP_1,
+ BFD_RELOC_MMIX_JMP_2,
+ BFD_RELOC_MMIX_JMP_3,
+
+/* This is a relocation for a relative address as in a GETA instruction or
+a branch. */
+ BFD_RELOC_MMIX_ADDR19,
+
+/* This is a relocation for a relative address as in a JMP instruction. */
+ BFD_RELOC_MMIX_ADDR27,
+
+/* This is a relocation for an instruction field that may be a general
+register or a value 0..255. */
+ BFD_RELOC_MMIX_REG_OR_BYTE,
+
+/* This is a relocation for an instruction field that may be a general
+register. */
+ BFD_RELOC_MMIX_REG,
+
+/* This is a relocation for two instruction fields holding a register and
+an offset, the equivalent of the relocation. */
+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
+
+/* This relocation is an assertion that the expression is not allocated as
+a global register. It does not modify contents. */
+ BFD_RELOC_MMIX_LOCAL,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
+short offset into 7 bits. */
+ BFD_RELOC_AVR_7_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
+short offset into 12 bits. */
+ BFD_RELOC_AVR_13_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
+program memory address) into 16 bits. */
+ BFD_RELOC_AVR_16_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of program memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually data memory address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of data memory address) into 8 bit immediate value of
+SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(most high 8 bit of program memory address) into 8 bit immediate value
+of LDI or SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually command address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of 16 bit command address) into 8 bit immediate value
+of SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 6 bit of 22 bit command address) into 8 bit immediate
+value of SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
+
+/* This is a 32 bit reloc for the AVR that stores 23 bit value
+into 22 bits. */
+ BFD_RELOC_AVR_CALL,
+
+/* This is a 16 bit reloc for the AVR that stores all needed bits
+for absolute addressing with ldi with overflow check to linktime */
+ BFD_RELOC_AVR_LDI,
+
+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
+instructions */
+ BFD_RELOC_AVR_6,
+
+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
+instructions */
+ BFD_RELOC_AVR_6_ADIW,
+
+/* Direct 12 bit. */
+ BFD_RELOC_390_12,
+
+/* 12 bit GOT offset. */
+ BFD_RELOC_390_GOT12,
+
+/* 32 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT32,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_390_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_390_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_390_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_390_RELATIVE,
+
+/* 32 bit PC relative offset to GOT. */
+ BFD_RELOC_390_GOTPC,
+
+/* 16 bit GOT offset. */
+ BFD_RELOC_390_GOT16,
+
+/* PC relative 16 bit shifted by 1. */
+ BFD_RELOC_390_PC16DBL,
+
+/* 16 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT16DBL,
+
+/* PC relative 32 bit shifted by 1. */
+ BFD_RELOC_390_PC32DBL,
+
+/* 32 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT32DBL,
+
+/* 32 bit PC rel. GOT shifted by 1. */
+ BFD_RELOC_390_GOTPCDBL,
+
+/* 64 bit GOT offset. */
+ BFD_RELOC_390_GOT64,
+
+/* 64 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT64,
+
+/* 32 bit rel. offset to GOT entry. */
+ BFD_RELOC_390_GOTENT,
+
+/* 64 bit offset to GOT. */
+ BFD_RELOC_390_GOTOFF64,
+
+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT12,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT16,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT32,
+
+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT64,
+
+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLTENT,
+
+/* 16-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF16,
+
+/* 32-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF32,
+
+/* 64-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF64,
+
+/* s390 tls relocations. */
+ BFD_RELOC_390_TLS_LOAD,
+ BFD_RELOC_390_TLS_GDCALL,
+ BFD_RELOC_390_TLS_LDCALL,
+ BFD_RELOC_390_TLS_GD32,
+ BFD_RELOC_390_TLS_GD64,
+ BFD_RELOC_390_TLS_GOTIE12,
+ BFD_RELOC_390_TLS_GOTIE32,
+ BFD_RELOC_390_TLS_GOTIE64,
+ BFD_RELOC_390_TLS_LDM32,
+ BFD_RELOC_390_TLS_LDM64,
+ BFD_RELOC_390_TLS_IE32,
+ BFD_RELOC_390_TLS_IE64,
+ BFD_RELOC_390_TLS_IEENT,
+ BFD_RELOC_390_TLS_LE32,
+ BFD_RELOC_390_TLS_LE64,
+ BFD_RELOC_390_TLS_LDO32,
+ BFD_RELOC_390_TLS_LDO64,
+ BFD_RELOC_390_TLS_DTPMOD,
+ BFD_RELOC_390_TLS_DTPOFF,
+ BFD_RELOC_390_TLS_TPOFF,
+
+/* Long displacement extension. */
+ BFD_RELOC_390_20,
+ BFD_RELOC_390_GOT20,
+ BFD_RELOC_390_GOTPLT20,
+ BFD_RELOC_390_TLS_GOTIE20,
+
+/* Scenix IP2K - 9-bit register number / data address */
+ BFD_RELOC_IP2K_FR9,
+
+/* Scenix IP2K - 4-bit register/data bank number */
+ BFD_RELOC_IP2K_BANK,
+
+/* Scenix IP2K - low 13 bits of instruction word address */
+ BFD_RELOC_IP2K_ADDR16CJP,
+
+/* Scenix IP2K - high 3 bits of instruction word address */
+ BFD_RELOC_IP2K_PAGE3,
+
+/* Scenix IP2K - ext/low/high 8 bits of data address */
+ BFD_RELOC_IP2K_LO8DATA,
+ BFD_RELOC_IP2K_HI8DATA,
+ BFD_RELOC_IP2K_EX8DATA,
+
+/* Scenix IP2K - low/high 8 bits of instruction word address */
+ BFD_RELOC_IP2K_LO8INSN,
+ BFD_RELOC_IP2K_HI8INSN,
+
+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
+ BFD_RELOC_IP2K_PC_SKIP,
+
+/* Scenix IP2K - 16 bit word address in text section. */
+ BFD_RELOC_IP2K_TEXT,
+
+/* Scenix IP2K - 7-bit sp or dp offset */
+ BFD_RELOC_IP2K_FR_OFFSET,
+
+/* Scenix VPE4K coprocessor - data/insn-space addressing */
+ BFD_RELOC_VPE4KMATH_DATA,
+ BFD_RELOC_VPE4KMATH_INSN,
+
+/* These two relocations are used by the linker to determine which of
+the entries in a C++ virtual function table are actually used. When
+the --gc-sections option is given, the linker will zero out the entries
+that are not used, so that the code for those functions need not be
+included in the output.
+
+VTABLE_INHERIT is a zero-space relocation used to describe to the
+linker the inheritance tree of a C++ virtual function table. The
+relocation's symbol should be the parent class' vtable, and the
+relocation should be located at the child vtable.
+
+VTABLE_ENTRY is a zero-space relocation that describes the use of a
+virtual function table entry. The reloc's symbol should refer to the
+table of the class mentioned in the code. Off of that base, an offset
+describes the entry that is being used. For Rela hosts, this offset
+is stored in the reloc's addend. For Rel hosts, we are forced to put
+this offset in the reloc's section offset. */
+ BFD_RELOC_VTABLE_INHERIT,
+ BFD_RELOC_VTABLE_ENTRY,
+
+/* Intel IA64 Relocations. */
+ BFD_RELOC_IA64_IMM14,
+ BFD_RELOC_IA64_IMM22,
+ BFD_RELOC_IA64_IMM64,
+ BFD_RELOC_IA64_DIR32MSB,
+ BFD_RELOC_IA64_DIR32LSB,
+ BFD_RELOC_IA64_DIR64MSB,
+ BFD_RELOC_IA64_DIR64LSB,
+ BFD_RELOC_IA64_GPREL22,
+ BFD_RELOC_IA64_GPREL64I,
+ BFD_RELOC_IA64_GPREL32MSB,
+ BFD_RELOC_IA64_GPREL32LSB,
+ BFD_RELOC_IA64_GPREL64MSB,
+ BFD_RELOC_IA64_GPREL64LSB,
+ BFD_RELOC_IA64_LTOFF22,
+ BFD_RELOC_IA64_LTOFF64I,
+ BFD_RELOC_IA64_PLTOFF22,
+ BFD_RELOC_IA64_PLTOFF64I,
+ BFD_RELOC_IA64_PLTOFF64MSB,
+ BFD_RELOC_IA64_PLTOFF64LSB,
+ BFD_RELOC_IA64_FPTR64I,
+ BFD_RELOC_IA64_FPTR32MSB,
+ BFD_RELOC_IA64_FPTR32LSB,
+ BFD_RELOC_IA64_FPTR64MSB,
+ BFD_RELOC_IA64_FPTR64LSB,
+ BFD_RELOC_IA64_PCREL21B,
+ BFD_RELOC_IA64_PCREL21BI,
+ BFD_RELOC_IA64_PCREL21M,
+ BFD_RELOC_IA64_PCREL21F,
+ BFD_RELOC_IA64_PCREL22,
+ BFD_RELOC_IA64_PCREL60B,
+ BFD_RELOC_IA64_PCREL64I,
+ BFD_RELOC_IA64_PCREL32MSB,
+ BFD_RELOC_IA64_PCREL32LSB,
+ BFD_RELOC_IA64_PCREL64MSB,
+ BFD_RELOC_IA64_PCREL64LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR22,
+ BFD_RELOC_IA64_LTOFF_FPTR64I,
+ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
+ BFD_RELOC_IA64_SEGREL32MSB,
+ BFD_RELOC_IA64_SEGREL32LSB,
+ BFD_RELOC_IA64_SEGREL64MSB,
+ BFD_RELOC_IA64_SEGREL64LSB,
+ BFD_RELOC_IA64_SECREL32MSB,
+ BFD_RELOC_IA64_SECREL32LSB,
+ BFD_RELOC_IA64_SECREL64MSB,
+ BFD_RELOC_IA64_SECREL64LSB,
+ BFD_RELOC_IA64_REL32MSB,
+ BFD_RELOC_IA64_REL32LSB,
+ BFD_RELOC_IA64_REL64MSB,
+ BFD_RELOC_IA64_REL64LSB,
+ BFD_RELOC_IA64_LTV32MSB,
+ BFD_RELOC_IA64_LTV32LSB,
+ BFD_RELOC_IA64_LTV64MSB,
+ BFD_RELOC_IA64_LTV64LSB,
+ BFD_RELOC_IA64_IPLTMSB,
+ BFD_RELOC_IA64_IPLTLSB,
+ BFD_RELOC_IA64_COPY,
+ BFD_RELOC_IA64_LTOFF22X,
+ BFD_RELOC_IA64_LDXMOV,
+ BFD_RELOC_IA64_TPREL14,
+ BFD_RELOC_IA64_TPREL22,
+ BFD_RELOC_IA64_TPREL64I,
+ BFD_RELOC_IA64_TPREL64MSB,
+ BFD_RELOC_IA64_TPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_TPREL22,
+ BFD_RELOC_IA64_DTPMOD64MSB,
+ BFD_RELOC_IA64_DTPMOD64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPMOD22,
+ BFD_RELOC_IA64_DTPREL14,
+ BFD_RELOC_IA64_DTPREL22,
+ BFD_RELOC_IA64_DTPREL64I,
+ BFD_RELOC_IA64_DTPREL32MSB,
+ BFD_RELOC_IA64_DTPREL32LSB,
+ BFD_RELOC_IA64_DTPREL64MSB,
+ BFD_RELOC_IA64_DTPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPREL22,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit high part of an absolute address. */
+ BFD_RELOC_M68HC11_HI8,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit low part of an absolute address. */
+ BFD_RELOC_M68HC11_LO8,
+
+/* Motorola 68HC11 reloc.
+This is the 3 bit of a value. */
+ BFD_RELOC_M68HC11_3B,
+
+/* Motorola 68HC11 reloc.
+This reloc marks the beginning of a jump/call instruction.
+It is used for linker relaxation to correctly identify beginning
+of instruction and change some branches to use PC-relative
+addressing mode. */
+ BFD_RELOC_M68HC11_RL_JUMP,
+
+/* Motorola 68HC11 reloc.
+This reloc marks a group of several instructions that gcc generates
+and for which the linker relaxation pass can modify and/or remove
+some of them. */
+ BFD_RELOC_M68HC11_RL_GROUP,
+
+/* Motorola 68HC11 reloc.
+This is the 16-bit lower part of an address. It is used for 'call'
+instruction to specify the symbol address without any special
+transformation (due to memory bank window). */
+ BFD_RELOC_M68HC11_LO16,
+
+/* Motorola 68HC11 reloc.
+This is a 8-bit reloc that specifies the page number of an address.
+It is used by 'call' instruction to specify the page number of
+the symbol. */
+ BFD_RELOC_M68HC11_PAGE,
+
+/* Motorola 68HC11 reloc.
+This is a 24-bit reloc that represents the address with a 16-bit
+value and a 8-bit page number. The symbol address is transformed
+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
+ BFD_RELOC_M68HC11_24,
+
+/* Motorola 68HC12 reloc.
+This is the 5 bits of a value. */
+ BFD_RELOC_M68HC12_5B,
+
+/* NS CR16C Relocations. */
+ BFD_RELOC_16C_NUM08,
+ BFD_RELOC_16C_NUM08_C,
+ BFD_RELOC_16C_NUM16,
+ BFD_RELOC_16C_NUM16_C,
+ BFD_RELOC_16C_NUM32,
+ BFD_RELOC_16C_NUM32_C,
+ BFD_RELOC_16C_DISP04,
+ BFD_RELOC_16C_DISP04_C,
+ BFD_RELOC_16C_DISP08,
+ BFD_RELOC_16C_DISP08_C,
+ BFD_RELOC_16C_DISP16,
+ BFD_RELOC_16C_DISP16_C,
+ BFD_RELOC_16C_DISP24,
+ BFD_RELOC_16C_DISP24_C,
+ BFD_RELOC_16C_DISP24a,
+ BFD_RELOC_16C_DISP24a_C,
+ BFD_RELOC_16C_REG04,
+ BFD_RELOC_16C_REG04_C,
+ BFD_RELOC_16C_REG04a,
+ BFD_RELOC_16C_REG04a_C,
+ BFD_RELOC_16C_REG14,
+ BFD_RELOC_16C_REG14_C,
+ BFD_RELOC_16C_REG16,
+ BFD_RELOC_16C_REG16_C,
+ BFD_RELOC_16C_REG20,
+ BFD_RELOC_16C_REG20_C,
+ BFD_RELOC_16C_ABS20,
+ BFD_RELOC_16C_ABS20_C,
+ BFD_RELOC_16C_ABS24,
+ BFD_RELOC_16C_ABS24_C,
+ BFD_RELOC_16C_IMM04,
+ BFD_RELOC_16C_IMM04_C,
+ BFD_RELOC_16C_IMM16,
+ BFD_RELOC_16C_IMM16_C,
+ BFD_RELOC_16C_IMM20,
+ BFD_RELOC_16C_IMM20_C,
+ BFD_RELOC_16C_IMM24,
+ BFD_RELOC_16C_IMM24_C,
+ BFD_RELOC_16C_IMM32,
+ BFD_RELOC_16C_IMM32_C,
+
+/* NS CRX Relocations. */
+ BFD_RELOC_CRX_REL4,
+ BFD_RELOC_CRX_REL8,
+ BFD_RELOC_CRX_REL8_CMP,
+ BFD_RELOC_CRX_REL16,
+ BFD_RELOC_CRX_REL24,
+ BFD_RELOC_CRX_REL32,
+ BFD_RELOC_CRX_REGREL12,
+ BFD_RELOC_CRX_REGREL22,
+ BFD_RELOC_CRX_REGREL28,
+ BFD_RELOC_CRX_REGREL32,
+ BFD_RELOC_CRX_ABS16,
+ BFD_RELOC_CRX_ABS32,
+ BFD_RELOC_CRX_NUM8,
+ BFD_RELOC_CRX_NUM16,
+ BFD_RELOC_CRX_NUM32,
+ BFD_RELOC_CRX_IMM16,
+ BFD_RELOC_CRX_IMM32,
+ BFD_RELOC_CRX_SWITCH8,
+ BFD_RELOC_CRX_SWITCH16,
+ BFD_RELOC_CRX_SWITCH32,
+
+/* These relocs are only used within the CRIS assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_CRIS_BDISP8,
+ BFD_RELOC_CRIS_UNSIGNED_5,
+ BFD_RELOC_CRIS_SIGNED_6,
+ BFD_RELOC_CRIS_UNSIGNED_6,
+ BFD_RELOC_CRIS_SIGNED_8,
+ BFD_RELOC_CRIS_UNSIGNED_8,
+ BFD_RELOC_CRIS_SIGNED_16,
+ BFD_RELOC_CRIS_UNSIGNED_16,
+ BFD_RELOC_CRIS_LAPCQ_OFFSET,
+ BFD_RELOC_CRIS_UNSIGNED_4,
+
+/* Relocs used in ELF shared libraries for CRIS. */
+ BFD_RELOC_CRIS_COPY,
+ BFD_RELOC_CRIS_GLOB_DAT,
+ BFD_RELOC_CRIS_JUMP_SLOT,
+ BFD_RELOC_CRIS_RELATIVE,
+
+/* 32-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_32_GOT,
+
+/* 16-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_16_GOT,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_32_GOTPLT,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_16_GOTPLT,
+
+/* 32-bit offset to symbol, relative to GOT. */
+ BFD_RELOC_CRIS_32_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to GOT. */
+ BFD_RELOC_CRIS_32_PLT_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
+ BFD_RELOC_CRIS_32_PLT_PCREL,
+
+/* Intel i860 Relocations. */
+ BFD_RELOC_860_COPY,
+ BFD_RELOC_860_GLOB_DAT,
+ BFD_RELOC_860_JUMP_SLOT,
+ BFD_RELOC_860_RELATIVE,
+ BFD_RELOC_860_PC26,
+ BFD_RELOC_860_PLT26,
+ BFD_RELOC_860_PC16,
+ BFD_RELOC_860_LOW0,
+ BFD_RELOC_860_SPLIT0,
+ BFD_RELOC_860_LOW1,
+ BFD_RELOC_860_SPLIT1,
+ BFD_RELOC_860_LOW2,
+ BFD_RELOC_860_SPLIT2,
+ BFD_RELOC_860_LOW3,
+ BFD_RELOC_860_LOGOT0,
+ BFD_RELOC_860_SPGOT0,
+ BFD_RELOC_860_LOGOT1,
+ BFD_RELOC_860_SPGOT1,
+ BFD_RELOC_860_LOGOTOFF0,
+ BFD_RELOC_860_SPGOTOFF0,
+ BFD_RELOC_860_LOGOTOFF1,
+ BFD_RELOC_860_SPGOTOFF1,
+ BFD_RELOC_860_LOGOTOFF2,
+ BFD_RELOC_860_LOGOTOFF3,
+ BFD_RELOC_860_LOPC,
+ BFD_RELOC_860_HIGHADJ,
+ BFD_RELOC_860_HAGOT,
+ BFD_RELOC_860_HAGOTOFF,
+ BFD_RELOC_860_HAPC,
+ BFD_RELOC_860_HIGH,
+ BFD_RELOC_860_HIGOT,
+ BFD_RELOC_860_HIGOTOFF,
+
+/* OpenRISC Relocations. */
+ BFD_RELOC_OPENRISC_ABS_26,
+ BFD_RELOC_OPENRISC_REL_26,
+
+/* H8 elf Relocations. */
+ BFD_RELOC_H8_DIR16A8,
+ BFD_RELOC_H8_DIR16R8,
+ BFD_RELOC_H8_DIR24A8,
+ BFD_RELOC_H8_DIR24R8,
+ BFD_RELOC_H8_DIR32A16,
+
+/* Sony Xstormy16 Relocations. */
+ BFD_RELOC_XSTORMY16_REL_12,
+ BFD_RELOC_XSTORMY16_12,
+ BFD_RELOC_XSTORMY16_24,
+ BFD_RELOC_XSTORMY16_FPTR16,
+
+/* Relocations used by VAX ELF. */
+ BFD_RELOC_VAX_GLOB_DAT,
+ BFD_RELOC_VAX_JMP_SLOT,
+ BFD_RELOC_VAX_RELATIVE,
+
+/* Morpho MS1 - 16 bit immediate relocation. */
+ BFD_RELOC_MS1_PC16,
+
+/* Morpho MS1 - Hi 16 bits of an address. */
+ BFD_RELOC_MS1_HI16,
+
+/* Morpho MS1 - Low 16 bits of an address. */
+ BFD_RELOC_MS1_LO16,
+
+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MS1_GNU_VTINHERIT,
+
+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MS1_GNU_VTENTRY,
+
+/* msp430 specific relocation codes */
+ BFD_RELOC_MSP430_10_PCREL,
+ BFD_RELOC_MSP430_16_PCREL,
+ BFD_RELOC_MSP430_16,
+ BFD_RELOC_MSP430_16_PCREL_BYTE,
+ BFD_RELOC_MSP430_16_BYTE,
+ BFD_RELOC_MSP430_2X_PCREL,
+ BFD_RELOC_MSP430_RL_PCREL,
+
+/* IQ2000 Relocations. */
+ BFD_RELOC_IQ2000_OFFSET_16,
+ BFD_RELOC_IQ2000_OFFSET_21,
+ BFD_RELOC_IQ2000_UHI16,
+
+/* Special Xtensa relocation used only by PLT entries in ELF shared
+objects to indicate that the runtime linker should set the value
+to one of its own internal functions or data structures. */
+ BFD_RELOC_XTENSA_RTLD,
+
+/* Xtensa relocations for ELF shared objects. */
+ BFD_RELOC_XTENSA_GLOB_DAT,
+ BFD_RELOC_XTENSA_JMP_SLOT,
+ BFD_RELOC_XTENSA_RELATIVE,
+
+/* Xtensa relocation used in ELF object files for symbols that may require
+PLT entries. Otherwise, this is just a generic 32-bit relocation. */
+ BFD_RELOC_XTENSA_PLT,
+
+/* Xtensa relocations to mark the difference of two local symbols.
+These are only needed to support linker relaxation and can be ignored
+when not relaxing. The field is set to the value of the difference
+assuming no relaxation. The relocation encodes the position of the
+first symbol so the linker can determine whether to adjust the field
+value. */
+ BFD_RELOC_XTENSA_DIFF8,
+ BFD_RELOC_XTENSA_DIFF16,
+ BFD_RELOC_XTENSA_DIFF32,
+
+/* Generic Xtensa relocations for instruction operands. Only the slot
+number is encoded in the relocation. The relocation applies to the
+last PC-relative immediate operand, or if there are no PC-relative
+immediates, to the last immediate operand. */
+ BFD_RELOC_XTENSA_SLOT0_OP,
+ BFD_RELOC_XTENSA_SLOT1_OP,
+ BFD_RELOC_XTENSA_SLOT2_OP,
+ BFD_RELOC_XTENSA_SLOT3_OP,
+ BFD_RELOC_XTENSA_SLOT4_OP,
+ BFD_RELOC_XTENSA_SLOT5_OP,
+ BFD_RELOC_XTENSA_SLOT6_OP,
+ BFD_RELOC_XTENSA_SLOT7_OP,
+ BFD_RELOC_XTENSA_SLOT8_OP,
+ BFD_RELOC_XTENSA_SLOT9_OP,
+ BFD_RELOC_XTENSA_SLOT10_OP,
+ BFD_RELOC_XTENSA_SLOT11_OP,
+ BFD_RELOC_XTENSA_SLOT12_OP,
+ BFD_RELOC_XTENSA_SLOT13_OP,
+ BFD_RELOC_XTENSA_SLOT14_OP,
+
+/* Alternate Xtensa relocations. Only the slot is encoded in the
+relocation. The meaning of these relocations is opcode-specific. */
+ BFD_RELOC_XTENSA_SLOT0_ALT,
+ BFD_RELOC_XTENSA_SLOT1_ALT,
+ BFD_RELOC_XTENSA_SLOT2_ALT,
+ BFD_RELOC_XTENSA_SLOT3_ALT,
+ BFD_RELOC_XTENSA_SLOT4_ALT,
+ BFD_RELOC_XTENSA_SLOT5_ALT,
+ BFD_RELOC_XTENSA_SLOT6_ALT,
+ BFD_RELOC_XTENSA_SLOT7_ALT,
+ BFD_RELOC_XTENSA_SLOT8_ALT,
+ BFD_RELOC_XTENSA_SLOT9_ALT,
+ BFD_RELOC_XTENSA_SLOT10_ALT,
+ BFD_RELOC_XTENSA_SLOT11_ALT,
+ BFD_RELOC_XTENSA_SLOT12_ALT,
+ BFD_RELOC_XTENSA_SLOT13_ALT,
+ BFD_RELOC_XTENSA_SLOT14_ALT,
+
+/* Xtensa relocations for backward compatibility. These have all been
+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
+ BFD_RELOC_XTENSA_OP0,
+ BFD_RELOC_XTENSA_OP1,
+ BFD_RELOC_XTENSA_OP2,
+
+/* Xtensa relocation to mark that the assembler expanded the
+instructions from an original target. The expansion size is
+encoded in the reloc size. */
+ BFD_RELOC_XTENSA_ASM_EXPAND,
+
+/* Xtensa relocation to mark that the linker should simplify
+assembler-expanded instructions. This is commonly used
+internally by the linker after analysis of a
+BFD_RELOC_XTENSA_ASM_EXPAND. */
+ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
+ BFD_RELOC_UNUSED };
+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
+reloc_howto_type *bfd_reloc_type_lookup
+ (bfd *abfd, bfd_reloc_code_real_type code);
+
+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
+
+/* Extracted from syms.c. */
+
+typedef struct bfd_symbol
+{
+ /* A pointer to the BFD which owns the symbol. This information
+ is necessary so that a back end can work out what additional
+ information (invisible to the application writer) is carried
+ with the symbol.
+
+ This field is *almost* redundant, since you can use section->owner
+ instead, except that some symbols point to the global sections
+ bfd_{abs,com,und}_section. This could be fixed by making
+ these globals be per-bfd (or per-target-flavor). FIXME. */
+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
+
+ /* The text of the symbol. The name is left alone, and not copied; the
+ application may not alter it. */
+ const char *name;
+
+ /* The value of the symbol. This really should be a union of a
+ numeric value with a pointer, since some flags indicate that
+ a pointer to another symbol is stored here. */
+ symvalue value;
+
+ /* Attributes of a symbol. */
+#define BSF_NO_FLAGS 0x00
+
+ /* The symbol has local scope; <<static>> in <<C>>. The value
+ is the offset into the section of the data. */
+#define BSF_LOCAL 0x01
+
+ /* The symbol has global scope; initialized data in <<C>>. The
+ value is the offset into the section of the data. */
+#define BSF_GLOBAL 0x02
+
+ /* The symbol has global scope and is exported. The value is
+ the offset into the section of the data. */
+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
+
+ /* A normal C symbol would be one of:
+ <<BSF_LOCAL>>, <<BSF_FORT_COMM>>, <<BSF_UNDEFINED>> or
+ <<BSF_GLOBAL>>. */
+
+ /* The symbol is a debugging record. The value has an arbitrary
+ meaning, unless BSF_DEBUGGING_RELOC is also set. */
+#define BSF_DEBUGGING 0x08
+
+ /* The symbol denotes a function entry point. Used in ELF,
+ perhaps others someday. */
+#define BSF_FUNCTION 0x10
+
+ /* Used by the linker. */
+#define BSF_KEEP 0x20
+#define BSF_KEEP_G 0x40
+
+ /* A weak global symbol, overridable without warnings by
+ a regular global symbol of the same name. */
+#define BSF_WEAK 0x80
+
+ /* This symbol was created to point to a section, e.g. ELF's
+ STT_SECTION symbols. */
+#define BSF_SECTION_SYM 0x100
+
+ /* The symbol used to be a common symbol, but now it is
+ allocated. */
+#define BSF_OLD_COMMON 0x200
+
+ /* The default value for common data. */
+#define BFD_FORT_COMM_DEFAULT_VALUE 0
+
+ /* In some files the type of a symbol sometimes alters its
+ location in an output file - ie in coff a <<ISFCN>> symbol
+ which is also <<C_EXT>> symbol appears where it was
+ declared and not at the end of a section. This bit is set
+ by the target BFD part to convey this information. */
+#define BSF_NOT_AT_END 0x400
+
+ /* Signal that the symbol is the label of constructor section. */
+#define BSF_CONSTRUCTOR 0x800
+
+ /* Signal that the symbol is a warning symbol. The name is a
+ warning. The name of the next symbol is the one to warn about;
+ if a reference is made to a symbol with the same name as the next
+ symbol, a warning is issued by the linker. */
+#define BSF_WARNING 0x1000
+
+ /* Signal that the symbol is indirect. This symbol is an indirect
+ pointer to the symbol with the same name as the next symbol. */
+#define BSF_INDIRECT 0x2000
+
+ /* BSF_FILE marks symbols that contain a file name. This is used
+ for ELF STT_FILE symbols. */
+#define BSF_FILE 0x4000
+
+ /* Symbol is from dynamic linking information. */
+#define BSF_DYNAMIC 0x8000
+
+ /* The symbol denotes a data object. Used in ELF, and perhaps
+ others someday. */
+#define BSF_OBJECT 0x10000
+
+ /* This symbol is a debugging symbol. The value is the offset
+ into the section of the data. BSF_DEBUGGING should be set
+ as well. */
+#define BSF_DEBUGGING_RELOC 0x20000
+
+ /* This symbol is thread local. Used in ELF. */
+#define BSF_THREAD_LOCAL 0x40000
+
+ flagword flags;
+
+ /* A pointer to the section to which this symbol is
+ relative. This will always be non NULL, there are special
+ sections for undefined and absolute symbols. */
+ struct bfd_section *section;
+
+ /* Back end special data. */
+ union
+ {
+ void *p;
+ bfd_vma i;
+ }
+ udata;
+}
+asymbol;
+
+#define bfd_get_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
+
+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
+
+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
+
+#define bfd_is_local_label_name(abfd, name) \
+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
+
+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
+
+#define bfd_is_target_special_symbol(abfd, sym) \
+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
+
+#define bfd_canonicalize_symtab(abfd, location) \
+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
+
+bfd_boolean bfd_set_symtab
+ (bfd *abfd, asymbol **location, unsigned int count);
+
+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
+
+#define bfd_make_empty_symbol(abfd) \
+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
+
+asymbol *_bfd_generic_make_empty_symbol (bfd *);
+
+#define bfd_make_debug_symbol(abfd,ptr,size) \
+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
+
+int bfd_decode_symclass (asymbol *symbol);
+
+bfd_boolean bfd_is_undefined_symclass (int symclass);
+
+void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
+
+bfd_boolean bfd_copy_private_symbol_data
+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
+
+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
+ (ibfd, isymbol, obfd, osymbol))
+
+/* Extracted from bfd.c. */
+struct bfd
+{
+ /* A unique identifier of the BFD */
+ unsigned int id;
+
+ /* The filename the application opened the BFD with. */
+ const char *filename;
+
+ /* A pointer to the target jump table. */
+ const struct bfd_target *xvec;
+
+ /* The IOSTREAM, and corresponding IO vector that provide access
+ to the file backing the BFD. */
+ void *iostream;
+ const struct bfd_iovec *iovec;
+
+ /* Is the file descriptor being cached? That is, can it be closed as
+ needed, and re-opened when accessed later? */
+ bfd_boolean cacheable;
+
+ /* Marks whether there was a default target specified when the
+ BFD was opened. This is used to select which matching algorithm
+ to use to choose the back end. */
+ bfd_boolean target_defaulted;
+
+ /* The caching routines use these to maintain a
+ least-recently-used list of BFDs. */
+ struct bfd *lru_prev, *lru_next;
+
+ /* When a file is closed by the caching routines, BFD retains
+ state information on the file here... */
+ ufile_ptr where;
+
+ /* ... and here: (``once'' means at least once). */
+ bfd_boolean opened_once;
+
+ /* Set if we have a locally maintained mtime value, rather than
+ getting it from the file each time. */
+ bfd_boolean mtime_set;
+
+ /* File modified time, if mtime_set is TRUE. */
+ long mtime;
+
+ /* Reserved for an unimplemented file locking extension. */
+ int ifd;
+
+ /* The format which belongs to the BFD. (object, core, etc.) */
+ bfd_format format;
+
+ /* The direction with which the BFD was opened. */
+ enum bfd_direction
+ {
+ no_direction = 0,
+ read_direction = 1,
+ write_direction = 2,
+ both_direction = 3
+ }
+ direction;
+
+ /* Format_specific flags. */
+ flagword flags;
+
+ /* Currently my_archive is tested before adding origin to
+ anything. I believe that this can become always an add of
+ origin, with origin set to 0 for non archive files. */
+ ufile_ptr origin;
+
+ /* Remember when output has begun, to stop strange things
+ from happening. */
+ bfd_boolean output_has_begun;
+
+ /* A hash table for section names. */
+ struct bfd_hash_table section_htab;
+
+ /* Pointer to linked list of sections. */
+ struct bfd_section *sections;
+
+ /* The last section on the section list. */
+ struct bfd_section *section_last;
+
+ /* The number of sections. */
+ unsigned int section_count;
+
+ /* Stuff only useful for object files:
+ The start address. */
+ bfd_vma start_address;
+
+ /* Used for input and output. */
+ unsigned int symcount;
+
+ /* Symbol table for output BFD (with symcount entries). */
+ struct bfd_symbol **outsymbols;
+
+ /* Used for slurped dynamic symbol tables. */
+ unsigned int dynsymcount;
+
+ /* Pointer to structure which contains architecture information. */
+ const struct bfd_arch_info *arch_info;
+
+ /* Flag set if symbols from this BFD should not be exported. */
+ bfd_boolean no_export;
+
+ /* Stuff only useful for archives. */
+ void *arelt_data;
+ struct bfd *my_archive; /* The containing archive BFD. */
+ struct bfd *next; /* The next BFD in the archive. */
+ struct bfd *archive_head; /* The first BFD in the archive. */
+ bfd_boolean has_armap;
+
+ /* A chain of BFD structures involved in a link. */
+ struct bfd *link_next;
+
+ /* A field used by _bfd_generic_link_add_archive_symbols. This will
+ be used only for archive elements. */
+ int archive_pass;
+
+ /* Used by the back end to hold private data. */
+ union
+ {
+ struct aout_data_struct *aout_data;
+ struct artdata *aout_ar_data;
+ struct _oasys_data *oasys_obj_data;
+ struct _oasys_ar_data *oasys_ar_data;
+ struct coff_tdata *coff_obj_data;
+ struct pe_tdata *pe_obj_data;
+ struct xcoff_tdata *xcoff_obj_data;
+ struct ecoff_tdata *ecoff_obj_data;
+ struct ieee_data_struct *ieee_data;
+ struct ieee_ar_data_struct *ieee_ar_data;
+ struct srec_data_struct *srec_data;
+ struct ihex_data_struct *ihex_data;
+ struct tekhex_data_struct *tekhex_data;
+ struct elf_obj_tdata *elf_obj_data;
+ struct nlm_obj_tdata *nlm_obj_data;
+ struct bout_data_struct *bout_data;
+ struct mmo_data_struct *mmo_data;
+ struct sun_core_struct *sun_core_data;
+ struct sco5_core_struct *sco5_core_data;
+ struct trad_core_struct *trad_core_data;
+ struct som_data_struct *som_data;
+ struct hpux_core_struct *hpux_core_data;
+ struct hppabsd_core_struct *hppabsd_core_data;
+ struct sgi_core_struct *sgi_core_data;
+ struct lynx_core_struct *lynx_core_data;
+ struct osf_core_struct *osf_core_data;
+ struct cisco_core_struct *cisco_core_data;
+ struct versados_data_struct *versados_data;
+ struct netbsd_core_struct *netbsd_core_data;
+ struct mach_o_data_struct *mach_o_data;
+ struct mach_o_fat_data_struct *mach_o_fat_data;
+ struct bfd_pef_data_struct *pef_data;
+ struct bfd_pef_xlib_data_struct *pef_xlib_data;
+ struct bfd_sym_data_struct *sym_data;
+ void *any;
+ }
+ tdata;
+
+ /* Used by the application to hold private data. */
+ void *usrdata;
+
+ /* Where all the allocated stuff under this BFD goes. This is a
+ struct objalloc *, but we use void * to avoid requiring the inclusion
+ of objalloc.h. */
+ void *memory;
+};
+
+typedef enum bfd_error
+{
+ bfd_error_no_error = 0,
+ bfd_error_system_call,
+ bfd_error_invalid_target,
+ bfd_error_wrong_format,
+ bfd_error_wrong_object_format,
+ bfd_error_invalid_operation,
+ bfd_error_no_memory,
+ bfd_error_no_symbols,
+ bfd_error_no_armap,
+ bfd_error_no_more_archived_files,
+ bfd_error_malformed_archive,
+ bfd_error_file_not_recognized,
+ bfd_error_file_ambiguously_recognized,
+ bfd_error_no_contents,
+ bfd_error_nonrepresentable_section,
+ bfd_error_no_debug_section,
+ bfd_error_bad_value,
+ bfd_error_file_truncated,
+ bfd_error_file_too_big,
+ bfd_error_invalid_error_code
+}
+bfd_error_type;
+
+bfd_error_type bfd_get_error (void);
+
+void bfd_set_error (bfd_error_type error_tag);
+
+const char *bfd_errmsg (bfd_error_type error_tag);
+
+void bfd_perror (const char *message);
+
+typedef void (*bfd_error_handler_type) (const char *, ...);
+
+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
+
+void bfd_set_error_program_name (const char *);
+
+bfd_error_handler_type bfd_get_error_handler (void);
+
+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
+
+long bfd_canonicalize_reloc
+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
+
+void bfd_set_reloc
+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
+
+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
+
+int bfd_get_arch_size (bfd *abfd);
+
+int bfd_get_sign_extend_vma (bfd *abfd);
+
+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
+
+unsigned int bfd_get_gp_size (bfd *abfd);
+
+void bfd_set_gp_size (bfd *abfd, unsigned int i);
+
+bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
+
+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_header_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_header_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_merge_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
+
+#define bfd_set_private_flags(abfd, flags) \
+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
+#define bfd_sizeof_headers(abfd, reloc) \
+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc))
+
+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_nearest_line, \
+ (abfd, sec, syms, off, file, func, line))
+
+#define bfd_find_line(abfd, syms, sym, file, line) \
+ BFD_SEND (abfd, _bfd_find_line, \
+ (abfd, syms, sym, file, line))
+
+#define bfd_find_inliner_info(abfd, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_inliner_info, \
+ (abfd, file, func, line))
+
+#define bfd_debug_info_start(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
+
+#define bfd_debug_info_end(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
+
+#define bfd_debug_info_accumulate(abfd, section) \
+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
+
+#define bfd_stat_arch_elt(abfd, stat) \
+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
+
+#define bfd_update_armap_timestamp(abfd) \
+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
+
+#define bfd_set_arch_mach(abfd, arch, mach)\
+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
+
+#define bfd_relax_section(abfd, section, link_info, again) \
+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
+
+#define bfd_gc_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
+
+#define bfd_merge_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
+
+#define bfd_is_group_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
+
+#define bfd_discard_group(abfd, sec) \
+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
+
+#define bfd_link_hash_table_create(abfd) \
+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
+
+#define bfd_link_hash_table_free(abfd, hash) \
+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
+
+#define bfd_link_add_symbols(abfd, info) \
+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
+
+#define bfd_link_just_syms(abfd, sec, info) \
+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
+
+#define bfd_final_link(abfd, info) \
+ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
+
+#define bfd_free_cached_info(abfd) \
+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
+
+#define bfd_get_dynamic_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
+
+#define bfd_print_private_bfd_data(abfd, file)\
+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
+
+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
+
+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
+ dyncount, dynsyms, ret))
+
+#define bfd_get_dynamic_reloc_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
+
+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
+
+extern bfd_byte *bfd_get_relocated_section_contents
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
+ bfd_boolean, asymbol **);
+
+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
+
+struct bfd_preserve
+{
+ void *marker;
+ void *tdata;
+ flagword flags;
+ const struct bfd_arch_info *arch_info;
+ struct bfd_section *sections;
+ struct bfd_section *section_last;
+ unsigned int section_count;
+ struct bfd_hash_table section_htab;
+};
+
+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_restore (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_finish (bfd *, struct bfd_preserve *);
+
+/* Extracted from archive.c. */
+symindex bfd_get_next_mapent
+ (bfd *abfd, symindex previous, carsym **sym);
+
+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
+
+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
+
+/* Extracted from corefile.c. */
+const char *bfd_core_file_failing_command (bfd *abfd);
+
+int bfd_core_file_failing_signal (bfd *abfd);
+
+bfd_boolean core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+/* Extracted from targets.c. */
+#define BFD_SEND(bfd, message, arglist) \
+ ((*((bfd)->xvec->message)) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND
+#define BFD_SEND(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ ((*((bfd)->xvec->message)) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND_FMT
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+
+enum bfd_flavour
+{
+ bfd_target_unknown_flavour,
+ bfd_target_aout_flavour,
+ bfd_target_coff_flavour,
+ bfd_target_ecoff_flavour,
+ bfd_target_xcoff_flavour,
+ bfd_target_elf_flavour,
+ bfd_target_ieee_flavour,
+ bfd_target_nlm_flavour,
+ bfd_target_oasys_flavour,
+ bfd_target_tekhex_flavour,
+ bfd_target_srec_flavour,
+ bfd_target_ihex_flavour,
+ bfd_target_som_flavour,
+ bfd_target_os9k_flavour,
+ bfd_target_versados_flavour,
+ bfd_target_msdos_flavour,
+ bfd_target_ovax_flavour,
+ bfd_target_evax_flavour,
+ bfd_target_mmo_flavour,
+ bfd_target_mach_o_flavour,
+ bfd_target_pef_flavour,
+ bfd_target_pef_xlib_flavour,
+ bfd_target_sym_flavour
+};
+
+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
+
+/* Forward declaration. */
+typedef struct bfd_link_info _bfd_link_info;
+
+typedef struct bfd_target
+{
+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
+ char *name;
+
+ /* The "flavour" of a back end is a general indication about
+ the contents of a file. */
+ enum bfd_flavour flavour;
+
+ /* The order of bytes within the data area of a file. */
+ enum bfd_endian byteorder;
+
+ /* The order of bytes within the header parts of a file. */
+ enum bfd_endian header_byteorder;
+
+ /* A mask of all the flags which an executable may have set -
+ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
+ flagword object_flags;
+
+ /* A mask of all the flags which a section may have set - from
+ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
+ flagword section_flags;
+
+ /* The character normally found at the front of a symbol.
+ (if any), perhaps `_'. */
+ char symbol_leading_char;
+
+ /* The pad character for file names within an archive header. */
+ char ar_pad_char;
+
+ /* The maximum number of characters in an archive header. */
+ unsigned short ar_max_namelen;
+
+ /* Entries for byte swapping for data. These are different from the
+ other entry points, since they don't take a BFD as the first argument.
+ Certain other handlers could do the same. */
+ bfd_uint64_t (*bfd_getx64) (const void *);
+ bfd_int64_t (*bfd_getx_signed_64) (const void *);
+ void (*bfd_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_getx32) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ void (*bfd_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_getx16) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
+ void (*bfd_putx16) (bfd_vma, void *);
+
+ /* Byte swapping for the headers. */
+ bfd_uint64_t (*bfd_h_getx64) (const void *);
+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+ void (*bfd_h_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_h_getx32) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ void (*bfd_h_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_h_getx16) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
+ void (*bfd_h_putx16) (bfd_vma, void *);
+
+ /* Format dependent routines: these are vectors of entry points
+ within the target vector structure, one for each format to check. */
+
+ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
+
+ /* Set the format of a file being written. */
+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
+
+ /* Write cached information into a file being written, at <<bfd_close>>. */
+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
+
+
+ /* Generic entry points. */
+#define BFD_JUMP_TABLE_GENERIC(NAME) \
+ NAME##_close_and_cleanup, \
+ NAME##_bfd_free_cached_info, \
+ NAME##_new_section_hook, \
+ NAME##_get_section_contents, \
+ NAME##_get_section_contents_in_window
+
+ /* Called when the BFD is being closed to do any necessary cleanup. */
+ bfd_boolean (*_close_and_cleanup) (bfd *);
+ /* Ask the BFD to free all cached information. */
+ bfd_boolean (*_bfd_free_cached_info) (bfd *);
+ /* Called when a new section is created. */
+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
+ /* Read the contents of a section. */
+ bfd_boolean (*_bfd_get_section_contents)
+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
+ bfd_boolean (*_bfd_get_section_contents_in_window)
+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
+
+ /* Entry points to copy private data. */
+#define BFD_JUMP_TABLE_COPY(NAME) \
+ NAME##_bfd_copy_private_bfd_data, \
+ NAME##_bfd_merge_private_bfd_data, \
+ NAME##_bfd_copy_private_section_data, \
+ NAME##_bfd_copy_private_symbol_data, \
+ NAME##_bfd_copy_private_header_data, \
+ NAME##_bfd_set_private_flags, \
+ NAME##_bfd_print_private_bfd_data
+
+ /* Called to copy BFD general private data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
+ /* Called to merge BFD general private data from one object file
+ to a common output file when linking. */
+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
+ /* Called to copy BFD private section data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr);
+ /* Called to copy BFD private symbol data from one symbol
+ to another. */
+ bfd_boolean (*_bfd_copy_private_symbol_data)
+ (bfd *, asymbol *, bfd *, asymbol *);
+ /* Called to copy BFD private header data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_header_data)
+ (bfd *, bfd *);
+ /* Called to set private backend flags. */
+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
+
+ /* Called to print private BFD data. */
+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
+
+ /* Core file entry points. */
+#define BFD_JUMP_TABLE_CORE(NAME) \
+ NAME##_core_file_failing_command, \
+ NAME##_core_file_failing_signal, \
+ NAME##_core_file_matches_executable_p
+
+ char * (*_core_file_failing_command) (bfd *);
+ int (*_core_file_failing_signal) (bfd *);
+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
+
+ /* Archive entry points. */
+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
+ NAME##_slurp_armap, \
+ NAME##_slurp_extended_name_table, \
+ NAME##_construct_extended_name_table, \
+ NAME##_truncate_arname, \
+ NAME##_write_armap, \
+ NAME##_read_ar_hdr, \
+ NAME##_openr_next_archived_file, \
+ NAME##_get_elt_at_index, \
+ NAME##_generic_stat_arch_elt, \
+ NAME##_update_armap_timestamp
+
+ bfd_boolean (*_bfd_slurp_armap) (bfd *);
+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
+ bfd_boolean (*_bfd_construct_extended_name_table)
+ (bfd *, char **, bfd_size_type *, const char **);
+ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
+ bfd_boolean (*write_armap)
+ (bfd *, unsigned int, struct orl *, unsigned int, int);
+ void * (*_bfd_read_ar_hdr_fn) (bfd *);
+ bfd * (*openr_next_archived_file) (bfd *, bfd *);
+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
+
+ /* Entry points used for symbols. */
+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
+ NAME##_get_symtab_upper_bound, \
+ NAME##_canonicalize_symtab, \
+ NAME##_make_empty_symbol, \
+ NAME##_print_symbol, \
+ NAME##_get_symbol_info, \
+ NAME##_bfd_is_local_label_name, \
+ NAME##_bfd_is_target_special_symbol, \
+ NAME##_get_lineno, \
+ NAME##_find_nearest_line, \
+ _bfd_generic_find_line, \
+ NAME##_find_inliner_info, \
+ NAME##_bfd_make_debug_symbol, \
+ NAME##_read_minisymbols, \
+ NAME##_minisymbol_to_symbol
+
+ long (*_bfd_get_symtab_upper_bound) (bfd *);
+ long (*_bfd_canonicalize_symtab)
+ (bfd *, struct bfd_symbol **);
+ struct bfd_symbol *
+ (*_bfd_make_empty_symbol) (bfd *);
+ void (*_bfd_print_symbol)
+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
+ void (*_bfd_get_symbol_info)
+ (bfd *, struct bfd_symbol *, symbol_info *);
+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
+ bfd_boolean (*_bfd_find_nearest_line)
+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
+ const char **, const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_line)
+ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
+ const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_inliner_info)
+ (bfd *, const char **, const char **, unsigned int *);
+ /* Back-door to allow format-aware applications to create debug symbols
+ while using BFD for everything else. Currently used by the assembler
+ when creating COFF files. */
+ asymbol * (*_bfd_make_debug_symbol)
+ (bfd *, void *, unsigned long size);
+#define bfd_read_minisymbols(b, d, m, s) \
+ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
+ long (*_read_minisymbols)
+ (bfd *, bfd_boolean, void **, unsigned int *);
+#define bfd_minisymbol_to_symbol(b, d, m, f) \
+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
+ asymbol * (*_minisymbol_to_symbol)
+ (bfd *, bfd_boolean, const void *, asymbol *);
+
+ /* Routines for relocs. */
+#define BFD_JUMP_TABLE_RELOCS(NAME) \
+ NAME##_get_reloc_upper_bound, \
+ NAME##_canonicalize_reloc, \
+ NAME##_bfd_reloc_type_lookup
+
+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
+ long (*_bfd_canonicalize_reloc)
+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
+ /* See documentation on reloc types. */
+ reloc_howto_type *
+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
+
+ /* Routines used when writing an object file. */
+#define BFD_JUMP_TABLE_WRITE(NAME) \
+ NAME##_set_arch_mach, \
+ NAME##_set_section_contents
+
+ bfd_boolean (*_bfd_set_arch_mach)
+ (bfd *, enum bfd_architecture, unsigned long);
+ bfd_boolean (*_bfd_set_section_contents)
+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
+
+ /* Routines used by the linker. */
+#define BFD_JUMP_TABLE_LINK(NAME) \
+ NAME##_sizeof_headers, \
+ NAME##_bfd_get_relocated_section_contents, \
+ NAME##_bfd_relax_section, \
+ NAME##_bfd_link_hash_table_create, \
+ NAME##_bfd_link_hash_table_free, \
+ NAME##_bfd_link_add_symbols, \
+ NAME##_bfd_link_just_syms, \
+ NAME##_bfd_final_link, \
+ NAME##_bfd_link_split_section, \
+ NAME##_bfd_gc_sections, \
+ NAME##_bfd_merge_sections, \
+ NAME##_bfd_is_group_section, \
+ NAME##_bfd_discard_group, \
+ NAME##_section_already_linked \
+
+ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean);
+ bfd_byte * (*_bfd_get_relocated_section_contents)
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
+ bfd_byte *, bfd_boolean, struct bfd_symbol **);
+
+ bfd_boolean (*_bfd_relax_section)
+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
+
+ /* Create a hash table for the linker. Different backends store
+ different information in this table. */
+ struct bfd_link_hash_table *
+ (*_bfd_link_hash_table_create) (bfd *);
+
+ /* Release the memory associated with the linker hash table. */
+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
+
+ /* Add symbols from this object file into the hash table. */
+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
+
+ /* Indicate that we are only retrieving symbol values from this section. */
+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
+
+ /* Do a link based on the link_order structures attached to each
+ section of the BFD. */
+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
+
+ /* Should this section be split up into smaller pieces during linking. */
+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
+
+ /* Remove sections that are not referenced from the output. */
+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
+
+ /* Attempt to merge SEC_MERGE sections. */
+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
+
+ /* Is this section a member of a group? */
+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
+
+ /* Discard members of a group. */
+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
+
+ /* Check if SEC has been already linked during a reloceatable or
+ final link. */
+ void (*_section_already_linked) (bfd *, struct bfd_section *);
+
+ /* Routines to handle dynamic symbols and relocs. */
+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
+ NAME##_get_dynamic_symtab_upper_bound, \
+ NAME##_canonicalize_dynamic_symtab, \
+ NAME##_get_synthetic_symtab, \
+ NAME##_get_dynamic_reloc_upper_bound, \
+ NAME##_canonicalize_dynamic_reloc
+
+ /* Get the amount of memory required to hold the dynamic symbols. */
+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
+ /* Read in the dynamic symbols. */
+ long (*_bfd_canonicalize_dynamic_symtab)
+ (bfd *, struct bfd_symbol **);
+ /* Create synthetized symbols. */
+ long (*_bfd_get_synthetic_symtab)
+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
+ struct bfd_symbol **);
+ /* Get the amount of memory required to hold the dynamic relocs. */
+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
+ /* Read in the dynamic relocs. */
+ long (*_bfd_canonicalize_dynamic_reloc)
+ (bfd *, arelent **, struct bfd_symbol **);
+
+ /* Opposite endian version of this target. */
+ const struct bfd_target * alternative_target;
+
+ /* Data for use by back-end routines, which isn't
+ generic enough to belong in this structure. */
+ const void *backend_data;
+
+} bfd_target;
+
+bfd_boolean bfd_set_default_target (const char *name);
+
+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
+
+const char ** bfd_target_list (void);
+
+const bfd_target *bfd_search_for_target
+ (int (*search_func) (const bfd_target *, void *),
+ void *);
+
+/* Extracted from format.c. */
+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
+
+bfd_boolean bfd_check_format_matches
+ (bfd *abfd, bfd_format format, char ***matching);
+
+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
+
+const char *bfd_format_string (bfd_format format);
+
+/* Extracted from linker.c. */
+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
+
+#define bfd_link_split_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
+
+void bfd_section_already_linked (bfd *abfd, asection *sec);
+
+#define bfd_section_already_linked(abfd, sec) \
+ BFD_SEND (abfd, _section_already_linked, (abfd, sec))
+
+/* Extracted from simple.c. */
+bfd_byte *bfd_simple_get_relocated_section_contents
+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
--- /dev/null
+++ b/arch/x86/include/asm/bfd_64.h
@@ -0,0 +1,4917 @@
+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
+ "linker.c" and "simple.c".
+ Run "make headers" in your build bfd/ to regenerate. */
+
+/* Main header file for the bfd library -- portable access to object files.
+
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ Contributed by Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
+ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
+ * required.
+ * Keith Owens <kaos@sgi.com> 15 May 2006
+ */
+
+#ifndef __BFD_H_SEEN__
+#define __BFD_H_SEEN__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+#include <asm/ansidecl.h>
+#else /* __KERNEL__ */
+#include "ansidecl.h"
+#include "symcat.h"
+#endif /* __KERNEL__ */
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#ifndef SABER
+/* This hack is to avoid a problem with some strict ANSI C preprocessors.
+ The problem is, "32_" is not a valid preprocessing token, and we don't
+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
+ cause the inner CONCAT2 macros to be evaluated first, producing
+ still-valid pp-tokens. Then the final concatenation can be done. */
+#undef CONCAT4
+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
+#endif
+#endif
+
+/* The word size used by BFD on the host. This may be 64 with a 32
+ bit target if the host is 64 bit, or if other 64 bit targets have
+ been selected with --enable-targets, or if --enable-64-bit-bfd. */
+#define BFD_ARCH_SIZE 64
+
+/* The word size of the default bfd target. */
+#define BFD_DEFAULT_TARGET_SIZE 64
+
+#define BFD_HOST_64BIT_LONG 1
+#define BFD_HOST_LONG_LONG 1
+#if 1
+#define BFD_HOST_64_BIT long
+#define BFD_HOST_U_64_BIT unsigned long
+typedef BFD_HOST_64_BIT bfd_int64_t;
+typedef BFD_HOST_U_64_BIT bfd_uint64_t;
+#endif
+
+#if BFD_ARCH_SIZE >= 64
+#define BFD64
+#endif
+
+#ifndef INLINE
+#if __GNUC__ >= 2
+#define INLINE __inline__
+#else
+#define INLINE
+#endif
+#endif
+
+/* Forward declaration. */
+typedef struct bfd bfd;
+
+/* Boolean type used in bfd. Too many systems define their own
+ versions of "boolean" for us to safely typedef a "boolean" of
+ our own. Using an enum for "bfd_boolean" has its own set of
+ problems, with strange looking casts required to avoid warnings
+ on some older compilers. Thus we just use an int.
+
+ General rule: Functions which are bfd_boolean return TRUE on
+ success and FALSE on failure (unless they're a predicate). */
+
+typedef int bfd_boolean;
+#undef FALSE
+#undef TRUE
+#define FALSE 0
+#define TRUE 1
+
+#ifdef BFD64
+
+#ifndef BFD_HOST_64_BIT
+ #error No 64 bit integer type available
+#endif /* ! defined (BFD_HOST_64_BIT) */
+
+typedef BFD_HOST_U_64_BIT bfd_vma;
+typedef BFD_HOST_64_BIT bfd_signed_vma;
+typedef BFD_HOST_U_64_BIT bfd_size_type;
+typedef BFD_HOST_U_64_BIT symvalue;
+
+#ifndef fprintf_vma
+#if BFD_HOST_64BIT_LONG
+#define sprintf_vma(s,x) sprintf (s, "%016lx", x)
+#define fprintf_vma(f,x) fprintf (f, "%016lx", x)
+#else
+#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff)))
+#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff))
+#define fprintf_vma(s,x) \
+ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
+#define sprintf_vma(s,x) \
+ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
+#endif
+#endif
+
+#else /* not BFD64 */
+
+/* Represent a target address. Also used as a generic unsigned type
+ which is guaranteed to be big enough to hold any arithmetic types
+ we need to deal with. */
+typedef unsigned long bfd_vma;
+
+/* A generic signed type which is guaranteed to be big enough to hold any
+ arithmetic types we need to deal with. Can be assumed to be compatible
+ with bfd_vma in the same way that signed and unsigned ints are compatible
+ (as parameters, in assignment, etc). */
+typedef long bfd_signed_vma;
+
+typedef unsigned long symvalue;
+typedef unsigned long bfd_size_type;
+
+/* Print a bfd_vma x on stream s. */
+#define fprintf_vma(s,x) fprintf (s, "%08lx", x)
+#define sprintf_vma(s,x) sprintf (s, "%08lx", x)
+
+#endif /* not BFD64 */
+
+#define HALF_BFD_SIZE_TYPE \
+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
+
+#ifndef BFD_HOST_64_BIT
+/* Fall back on a 32 bit type. The idea is to make these types always
+ available for function return types, but in the case that
+ BFD_HOST_64_BIT is undefined such a function should abort or
+ otherwise signal an error. */
+typedef bfd_signed_vma bfd_int64_t;
+typedef bfd_vma bfd_uint64_t;
+#endif
+
+/* An offset into a file. BFD always uses the largest possible offset
+ based on the build time availability of fseek, fseeko, or fseeko64. */
+typedef BFD_HOST_64_BIT file_ptr;
+typedef unsigned BFD_HOST_64_BIT ufile_ptr;
+
+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
+
+#define printf_vma(x) fprintf_vma(stdout,x)
+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
+
+typedef unsigned int flagword; /* 32 bits of flags */
+typedef unsigned char bfd_byte;
+
+/* File formats. */
+
+typedef enum bfd_format
+{
+ bfd_unknown = 0, /* File format is unknown. */
+ bfd_object, /* Linker/assembler/compiler output. */
+ bfd_archive, /* Object archive file. */
+ bfd_core, /* Core dump. */
+ bfd_type_end /* Marks the end; don't use it! */
+}
+bfd_format;
+
+/* Values that may appear in the flags field of a BFD. These also
+ appear in the object_flags field of the bfd_target structure, where
+ they indicate the set of flags used by that backend (not all flags
+ are meaningful for all object file formats) (FIXME: at the moment,
+ the object_flags values have mostly just been copied from backend
+ to another, and are not necessarily correct). */
+
+/* No flags. */
+#define BFD_NO_FLAGS 0x00
+
+/* BFD contains relocation entries. */
+#define HAS_RELOC 0x01
+
+/* BFD is directly executable. */
+#define EXEC_P 0x02
+
+/* BFD has line number information (basically used for F_LNNO in a
+ COFF header). */
+#define HAS_LINENO 0x04
+
+/* BFD has debugging information. */
+#define HAS_DEBUG 0x08
+
+/* BFD has symbols. */
+#define HAS_SYMS 0x10
+
+/* BFD has local symbols (basically used for F_LSYMS in a COFF
+ header). */
+#define HAS_LOCALS 0x20
+
+/* BFD is a dynamic object. */
+#define DYNAMIC 0x40
+
+/* Text section is write protected (if D_PAGED is not set, this is
+ like an a.out NMAGIC file) (the linker sets this by default, but
+ clears it for -r or -N). */
+#define WP_TEXT 0x80
+
+/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
+ linker sets this by default, but clears it for -r or -n or -N). */
+#define D_PAGED 0x100
+
+/* BFD is relaxable (this means that bfd_relax_section may be able to
+ do something) (sometimes bfd_relax_section can do something even if
+ this is not set). */
+#define BFD_IS_RELAXABLE 0x200
+
+/* This may be set before writing out a BFD to request using a
+ traditional format. For example, this is used to request that when
+ writing out an a.out object the symbols not be hashed to eliminate
+ duplicates. */
+#define BFD_TRADITIONAL_FORMAT 0x400
+
+/* This flag indicates that the BFD contents are actually cached in
+ memory. If this is set, iostream points to a bfd_in_memory struct. */
+#define BFD_IN_MEMORY 0x800
+
+/* The sections in this BFD specify a memory page. */
+#define HAS_LOAD_PAGE 0x1000
+
+/* This BFD has been created by the linker and doesn't correspond
+ to any input file. */
+#define BFD_LINKER_CREATED 0x2000
+
+/* Symbols and relocation. */
+
+/* A count of carsyms (canonical archive symbols). */
+typedef unsigned long symindex;
+
+/* How to perform a relocation. */
+typedef const struct reloc_howto_struct reloc_howto_type;
+
+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
+
+/* General purpose part of a symbol X;
+ target specific parts are in libcoff.h, libaout.h, etc. */
+
+#define bfd_get_section(x) ((x)->section)
+#define bfd_get_output_section(x) ((x)->section->output_section)
+#define bfd_set_section(x,y) ((x)->section) = (y)
+#define bfd_asymbol_base(x) ((x)->section->vma)
+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
+#define bfd_asymbol_name(x) ((x)->name)
+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
+#define bfd_asymbol_bfd(x) ((x)->the_bfd)
+#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour)
+
+/* A canonical archive symbol. */
+/* This is a type pun with struct ranlib on purpose! */
+typedef struct carsym
+{
+ char *name;
+ file_ptr file_offset; /* Look here to find the file. */
+}
+carsym; /* To make these you call a carsymogen. */
+
+/* Used in generating armaps (archive tables of contents).
+ Perhaps just a forward definition would do? */
+struct orl /* Output ranlib. */
+{
+ char **name; /* Symbol name. */
+ union
+ {
+ file_ptr pos;
+ bfd *abfd;
+ } u; /* bfd* or file position. */
+ int namidx; /* Index into string table. */
+};
+
+/* Linenumber stuff. */
+typedef struct lineno_cache_entry
+{
+ unsigned int line_number; /* Linenumber from start of function. */
+ union
+ {
+ struct bfd_symbol *sym; /* Function name. */
+ bfd_vma offset; /* Offset into section. */
+ } u;
+}
+alent;
+
+/* Object and core file sections. */
+
+#define align_power(addr, align) \
+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
+
+typedef struct bfd_section *sec_ptr;
+
+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0)
+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0)
+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0)
+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0)
+#define bfd_section_name(bfd, ptr) ((ptr)->name)
+#define bfd_section_size(bfd, ptr) ((ptr)->size)
+#define bfd_get_section_size(ptr) ((ptr)->size)
+#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
+#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0)
+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata)
+
+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
+
+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
+/* Find the address one past the end of SEC. */
+#define bfd_get_section_limit(bfd, sec) \
+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \
+ / bfd_octets_per_byte (bfd))
+
+typedef struct stat stat_type;
+
+typedef enum bfd_print_symbol
+{
+ bfd_print_symbol_name,
+ bfd_print_symbol_more,
+ bfd_print_symbol_all
+} bfd_print_symbol_type;
+
+/* Information about a symbol that nm needs. */
+
+typedef struct _symbol_info
+{
+ symvalue value;
+ char type;
+ const char *name; /* Symbol name. */
+ unsigned char stab_type; /* Stab type. */
+ char stab_other; /* Stab other. */
+ short stab_desc; /* Stab desc. */
+ const char *stab_name; /* String for stab type. */
+} symbol_info;
+
+/* Get the name of a stabs type code. */
+
+extern const char *bfd_get_stab_name (int);
+
+/* Hash table routines. There is no way to free up a hash table. */
+
+/* An element in the hash table. Most uses will actually use a larger
+ structure, and an instance of this will be the first field. */
+
+struct bfd_hash_entry
+{
+ /* Next entry for this hash code. */
+ struct bfd_hash_entry *next;
+ /* String being hashed. */
+ const char *string;
+ /* Hash code. This is the full hash code, not the index into the
+ table. */
+ unsigned long hash;
+};
+
+/* A hash table. */
+
+struct bfd_hash_table
+{
+ /* The hash array. */
+ struct bfd_hash_entry **table;
+ /* The number of slots in the hash table. */
+ unsigned int size;
+ /* A function used to create new elements in the hash table. The
+ first entry is itself a pointer to an element. When this
+ function is first invoked, this pointer will be NULL. However,
+ having the pointer permits a hierarchy of method functions to be
+ built each of which calls the function in the superclass. Thus
+ each function should be written to allocate a new block of memory
+ only if the argument is NULL. */
+ struct bfd_hash_entry *(*newfunc)
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+ /* An objalloc for this hash table. This is a struct objalloc *,
+ but we use void * to avoid requiring the inclusion of objalloc.h. */
+ void *memory;
+};
+
+/* Initialize a hash table. */
+extern bfd_boolean bfd_hash_table_init
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *));
+
+/* Initialize a hash table specifying a size. */
+extern bfd_boolean bfd_hash_table_init_n
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int size);
+
+/* Free up a hash table. */
+extern void bfd_hash_table_free
+ (struct bfd_hash_table *);
+
+/* Look up a string in a hash table. If CREATE is TRUE, a new entry
+ will be created for this string if one does not already exist. The
+ COPY argument must be TRUE if this routine should copy the string
+ into newly allocated memory when adding an entry. */
+extern struct bfd_hash_entry *bfd_hash_lookup
+ (struct bfd_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy);
+
+/* Replace an entry in a hash table. */
+extern void bfd_hash_replace
+ (struct bfd_hash_table *, struct bfd_hash_entry *old,
+ struct bfd_hash_entry *nw);
+
+/* Base method for creating a hash table entry. */
+extern struct bfd_hash_entry *bfd_hash_newfunc
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+
+/* Grab some space for a hash table entry. */
+extern void *bfd_hash_allocate
+ (struct bfd_hash_table *, unsigned int);
+
+/* Traverse a hash table in a random order, calling a function on each
+ element. If the function returns FALSE, the traversal stops. The
+ INFO argument is passed to the function. */
+extern void bfd_hash_traverse
+ (struct bfd_hash_table *,
+ bfd_boolean (*) (struct bfd_hash_entry *, void *),
+ void *info);
+
+/* Allows the default size of a hash table to be configured. New hash
+ tables allocated using bfd_hash_table_init will be created with
+ this size. */
+extern void bfd_hash_set_default_size (bfd_size_type);
+
+/* This structure is used to keep track of stabs in sections
+ information while linking. */
+
+struct stab_info
+{
+ /* A hash table used to hold stabs strings. */
+ struct bfd_strtab_hash *strings;
+ /* The header file hash table. */
+ struct bfd_hash_table includes;
+ /* The first .stabstr section. */
+ struct bfd_section *stabstr;
+};
+
+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
+
+/* User program access to BFD facilities. */
+
+/* Direct I/O routines, for programs which know more about the object
+ file than BFD does. Use higher level routines if possible. */
+
+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
+extern int bfd_seek (bfd *, file_ptr, int);
+extern file_ptr bfd_tell (bfd *);
+extern int bfd_flush (bfd *);
+extern int bfd_stat (bfd *, struct stat *);
+
+/* Deprecated old routines. */
+#if __GNUC__
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#else
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#endif
+extern void warn_deprecated (const char *, const char *, int, const char *);
+
+/* Cast from const char * to char * so that caller can assign to
+ a char * without a warning. */
+#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
+#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
+#define bfd_get_format(abfd) ((abfd)->format)
+#define bfd_get_target(abfd) ((abfd)->xvec->name)
+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
+#define bfd_family_coff(abfd) \
+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_header_big_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
+#define bfd_header_little_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_get_file_flags(abfd) ((abfd)->flags)
+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
+#define bfd_my_archive(abfd) ((abfd)->my_archive)
+#define bfd_has_map(abfd) ((abfd)->has_armap)
+
+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
+#define bfd_usrdata(abfd) ((abfd)->usrdata)
+
+#define bfd_get_start_address(abfd) ((abfd)->start_address)
+#define bfd_get_symcount(abfd) ((abfd)->symcount)
+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
+#define bfd_count_sections(abfd) ((abfd)->section_count)
+
+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
+
+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
+
+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
+
+extern bfd_boolean bfd_cache_close
+ (bfd *abfd);
+/* NB: This declaration should match the autogenerated one in libbfd.h. */
+
+extern bfd_boolean bfd_cache_close_all (void);
+
+extern bfd_boolean bfd_record_phdr
+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
+
+/* Byte swapping routines. */
+
+bfd_uint64_t bfd_getb64 (const void *);
+bfd_uint64_t bfd_getl64 (const void *);
+bfd_int64_t bfd_getb_signed_64 (const void *);
+bfd_int64_t bfd_getl_signed_64 (const void *);
+bfd_vma bfd_getb32 (const void *);
+bfd_vma bfd_getl32 (const void *);
+bfd_signed_vma bfd_getb_signed_32 (const void *);
+bfd_signed_vma bfd_getl_signed_32 (const void *);
+bfd_vma bfd_getb16 (const void *);
+bfd_vma bfd_getl16 (const void *);
+bfd_signed_vma bfd_getb_signed_16 (const void *);
+bfd_signed_vma bfd_getl_signed_16 (const void *);
+void bfd_putb64 (bfd_uint64_t, void *);
+void bfd_putl64 (bfd_uint64_t, void *);
+void bfd_putb32 (bfd_vma, void *);
+void bfd_putl32 (bfd_vma, void *);
+void bfd_putb16 (bfd_vma, void *);
+void bfd_putl16 (bfd_vma, void *);
+
+/* Byte swapping routines which take size and endiannes as arguments. */
+
+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
+
+extern bfd_boolean bfd_section_already_linked_table_init (void);
+extern void bfd_section_already_linked_table_free (void);
+
+/* Externally visible ECOFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct ecoff_debug_info;
+struct ecoff_debug_swap;
+struct ecoff_extr;
+struct bfd_symbol;
+struct bfd_link_info;
+struct bfd_link_hash_entry;
+struct bfd_elf_version_tree;
+#endif
+extern bfd_vma bfd_ecoff_get_gp_value
+ (bfd * abfd);
+extern bfd_boolean bfd_ecoff_set_gp_value
+ (bfd *abfd, bfd_vma gp_value);
+extern bfd_boolean bfd_ecoff_set_regmasks
+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
+ unsigned long *cprmask);
+extern void *bfd_ecoff_debug_init
+ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern void bfd_ecoff_debug_free
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct ecoff_debug_info *input_debug,
+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate_other
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_externals
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
+ void (*set_index) (struct bfd_symbol *, bfd_size_type));
+extern bfd_boolean bfd_ecoff_debug_one_external
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, const char *name,
+ struct ecoff_extr *esym);
+extern bfd_size_type bfd_ecoff_debug_size
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap);
+extern bfd_boolean bfd_ecoff_write_debug
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, file_ptr where);
+extern bfd_boolean bfd_ecoff_write_accumulated_debug
+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap,
+ struct bfd_link_info *info, file_ptr where);
+
+/* Externally visible ELF routines. */
+
+struct bfd_link_needed_list
+{
+ struct bfd_link_needed_list *next;
+ bfd *by;
+ const char *name;
+};
+
+enum dynamic_lib_link_class {
+ DYN_NORMAL = 0,
+ DYN_AS_NEEDED = 1,
+ DYN_DT_NEEDED = 2,
+ DYN_NO_ADD_NEEDED = 4,
+ DYN_NO_NEEDED = 8
+};
+
+extern bfd_boolean bfd_elf_record_link_assignment
+ (struct bfd_link_info *, const char *, bfd_boolean);
+extern struct bfd_link_needed_list *bfd_elf_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_get_bfd_needed_list
+ (bfd *, struct bfd_link_needed_list **);
+extern bfd_boolean bfd_elf_size_dynamic_sections
+ (bfd *, const char *, const char *, const char *, const char * const *,
+ struct bfd_link_info *, struct bfd_section **,
+ struct bfd_elf_version_tree *);
+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
+ (bfd *, struct bfd_link_info *);
+extern void bfd_elf_set_dt_needed_name
+ (bfd *, const char *);
+extern const char *bfd_elf_get_dt_soname
+ (bfd *);
+extern void bfd_elf_set_dyn_lib_class
+ (bfd *, int);
+extern int bfd_elf_get_dyn_lib_class
+ (bfd *);
+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_discard_info
+ (bfd *, struct bfd_link_info *);
+extern unsigned int _bfd_elf_default_action_discarded
+ (struct bfd_section *);
+
+/* Return an upper bound on the number of bytes required to store a
+ copy of ABFD's program header table entries. Return -1 if an error
+ occurs; bfd_get_error will return an appropriate code. */
+extern long bfd_get_elf_phdr_upper_bound
+ (bfd *abfd);
+
+/* Copy ABFD's program header table entries to *PHDRS. The entries
+ will be stored as an array of Elf_Internal_Phdr structures, as
+ defined in include/elf/internal.h. To find out how large the
+ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
+
+ Return the number of program header table entries read, or -1 if an
+ error occurs; bfd_get_error will return an appropriate code. */
+extern int bfd_get_elf_phdrs
+ (bfd *abfd, void *phdrs);
+
+/* Create a new BFD as if by bfd_openr. Rather than opening a file,
+ reconstruct an ELF file by reading the segments out of remote memory
+ based on the ELF file header at EHDR_VMA and the ELF program headers it
+ points to. If not null, *LOADBASEP is filled in with the difference
+ between the VMAs from which the segments were read, and the VMAs the
+ file headers (and hence BFD's idea of each section's VMA) put them at.
+
+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
+ remote memory at target address VMA into the local buffer at MYADDR; it
+ should return zero on success or an `errno' code on failure. TEMPL must
+ be a BFD for an ELF target with the word size and byte order found in
+ the remote memory. */
+extern bfd *bfd_elf_bfd_from_remote_memory
+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len));
+
+/* Return the arch_size field of an elf bfd, or -1 if not elf. */
+extern int bfd_get_arch_size
+ (bfd *);
+
+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */
+extern int bfd_get_sign_extend_vma
+ (bfd *);
+
+extern struct bfd_section *_bfd_elf_tls_setup
+ (bfd *, struct bfd_link_info *);
+
+extern void _bfd_elf_provide_symbol
+ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *);
+
+extern void _bfd_elf_provide_section_bound_symbols
+ (struct bfd_link_info *, struct bfd_section *, const char *, const char *);
+
+extern void _bfd_elf_fix_excluded_sec_syms
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+/* SunOS shared library support routines for the linker. */
+
+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sunos_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_sunos_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, struct bfd_section **,
+ struct bfd_section **, struct bfd_section **);
+
+/* Linux shared library support routines for the linker. */
+
+extern bfd_boolean bfd_i386linux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_m68klinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sparclinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+
+/* mmap hacks */
+
+struct _bfd_window_internal;
+typedef struct _bfd_window_internal bfd_window_internal;
+
+typedef struct _bfd_window
+{
+ /* What the user asked for. */
+ void *data;
+ bfd_size_type size;
+ /* The actual window used by BFD. Small user-requested read-only
+ regions sharing a page may share a single window into the object
+ file. Read-write versions shouldn't until I've fixed things to
+ keep track of which portions have been claimed by the
+ application; don't want to give the same region back when the
+ application wants two writable copies! */
+ struct _bfd_window_internal *i;
+}
+bfd_window;
+
+extern void bfd_init_window
+ (bfd_window *);
+extern void bfd_free_window
+ (bfd_window *);
+extern bfd_boolean bfd_get_file_window
+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
+
+/* XCOFF support routines for the linker. */
+
+extern bfd_boolean bfd_xcoff_link_record_set
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
+extern bfd_boolean bfd_xcoff_import_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
+ const char *, const char *, const char *, unsigned int);
+extern bfd_boolean bfd_xcoff_export_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
+extern bfd_boolean bfd_xcoff_link_count_reloc
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, const char *, const char *,
+ unsigned long, unsigned long, unsigned long, bfd_boolean,
+ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean);
+extern bfd_boolean bfd_xcoff_link_generate_rtinit
+ (bfd *, const char *, const char *, bfd_boolean);
+
+/* XCOFF support routines for ar. */
+extern bfd_boolean bfd_xcoff_ar_archive_set_magic
+ (bfd *, char *);
+
+/* Externally visible COFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct internal_syment;
+union internal_auxent;
+#endif
+
+extern bfd_boolean bfd_coff_get_syment
+ (bfd *, struct bfd_symbol *, struct internal_syment *);
+
+extern bfd_boolean bfd_coff_get_auxent
+ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
+
+extern bfd_boolean bfd_coff_set_symbol_class
+ (bfd *, struct bfd_symbol *, unsigned int);
+
+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
+
+/* ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* PE ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_pe_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+void bfd_elf32_arm_set_target_relocs
+ (struct bfd_link_info *, int, char *, int, int);
+
+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM mapping symbol support */
+extern bfd_boolean bfd_is_arm_mapping_symbol_name
+ (const char * name);
+
+/* ARM Note section processing. */
+extern bfd_boolean bfd_arm_merge_machines
+ (bfd *, bfd *);
+
+extern bfd_boolean bfd_arm_update_notes
+ (bfd *, const char *);
+
+extern unsigned int bfd_arm_get_mach_from_notes
+ (bfd *, const char *);
+
+/* TI COFF load page support. */
+extern void bfd_ticoff_set_section_load_page
+ (struct bfd_section *, int);
+
+extern int bfd_ticoff_get_section_load_page
+ (struct bfd_section *);
+
+/* H8/300 functions. */
+extern bfd_vma bfd_h8300_pad_address
+ (bfd *, bfd_vma);
+
+/* IA64 Itanium code generation. Called from linker. */
+extern void bfd_elf32_ia64_after_parse
+ (int);
+
+extern void bfd_elf64_ia64_after_parse
+ (int);
+
+/* This structure is used for a comdat section, as in PE. A comdat
+ section is associated with a particular symbol. When the linker
+ sees a comdat section, it keeps only one of the sections with a
+ given name and associated with a given symbol. */
+
+struct coff_comdat_info
+{
+ /* The name of the symbol associated with a comdat section. */
+ const char *name;
+
+ /* The local symbol table index of the symbol associated with a
+ comdat section. This is only meaningful to the object file format
+ specific code; it is not an index into the list returned by
+ bfd_canonicalize_symtab. */
+ long symbol;
+};
+
+extern struct coff_comdat_info *bfd_coff_get_comdat_section
+ (bfd *, struct bfd_section *);
+
+/* Extracted from init.c. */
+void bfd_init (void);
+
+/* Extracted from opncls.c. */
+bfd *bfd_fopen (const char *filename, const char *target,
+ const char *mode, int fd);
+
+bfd *bfd_openr (const char *filename, const char *target);
+
+bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
+
+bfd *bfd_openstreamr (const char *, const char *, void *);
+
+bfd *bfd_openr_iovec (const char *filename, const char *target,
+ void *(*open) (struct bfd *nbfd,
+ void *open_closure),
+ void *open_closure,
+ file_ptr (*pread) (struct bfd *nbfd,
+ void *stream,
+ void *buf,
+ file_ptr nbytes,
+ file_ptr offset),
+ int (*close) (struct bfd *nbfd,
+ void *stream));
+
+bfd *bfd_openw (const char *filename, const char *target);
+
+bfd_boolean bfd_close (bfd *abfd);
+
+bfd_boolean bfd_close_all_done (bfd *);
+
+bfd *bfd_create (const char *filename, bfd *templ);
+
+bfd_boolean bfd_make_writable (bfd *abfd);
+
+bfd_boolean bfd_make_readable (bfd *abfd);
+
+unsigned long bfd_calc_gnu_debuglink_crc32
+ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
+
+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
+
+struct bfd_section *bfd_create_gnu_debuglink_section
+ (bfd *abfd, const char *filename);
+
+bfd_boolean bfd_fill_in_gnu_debuglink_section
+ (bfd *abfd, struct bfd_section *sect, const char *filename);
+
+/* Extracted from libbfd.c. */
+
+/* Byte swapping macros for user section data. */
+
+#define bfd_put_8(abfd, val, ptr) \
+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
+#define bfd_put_signed_8 \
+ bfd_put_8
+#define bfd_get_8(abfd, ptr) \
+ (*(unsigned char *) (ptr) & 0xff)
+#define bfd_get_signed_8(abfd, ptr) \
+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
+
+#define bfd_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
+#define bfd_put_signed_16 \
+ bfd_put_16
+#define bfd_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx16, (ptr))
+#define bfd_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
+
+#define bfd_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
+#define bfd_put_signed_32 \
+ bfd_put_32
+#define bfd_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx32, (ptr))
+#define bfd_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
+
+#define bfd_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
+#define bfd_put_signed_64 \
+ bfd_put_64
+#define bfd_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx64, (ptr))
+#define bfd_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
+
+#define bfd_get(bits, abfd, ptr) \
+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
+ : (abort (), (bfd_vma) - 1))
+
+#define bfd_put(bits, abfd, val, ptr) \
+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
+ : (abort (), (void) 0))
+
+
+/* Byte swapping macros for file header data. */
+
+#define bfd_h_put_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_put_signed_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_get_8(abfd, ptr) \
+ bfd_get_8 (abfd, ptr)
+#define bfd_h_get_signed_8(abfd, ptr) \
+ bfd_get_signed_8 (abfd, ptr)
+
+#define bfd_h_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
+#define bfd_h_put_signed_16 \
+ bfd_h_put_16
+#define bfd_h_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx16, (ptr))
+#define bfd_h_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
+
+#define bfd_h_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
+#define bfd_h_put_signed_32 \
+ bfd_h_put_32
+#define bfd_h_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx32, (ptr))
+#define bfd_h_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
+
+#define bfd_h_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
+#define bfd_h_put_signed_64 \
+ bfd_h_put_64
+#define bfd_h_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx64, (ptr))
+#define bfd_h_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
+
+/* Aliases for the above, which should eventually go away. */
+
+#define H_PUT_64 bfd_h_put_64
+#define H_PUT_32 bfd_h_put_32
+#define H_PUT_16 bfd_h_put_16
+#define H_PUT_8 bfd_h_put_8
+#define H_PUT_S64 bfd_h_put_signed_64
+#define H_PUT_S32 bfd_h_put_signed_32
+#define H_PUT_S16 bfd_h_put_signed_16
+#define H_PUT_S8 bfd_h_put_signed_8
+#define H_GET_64 bfd_h_get_64
+#define H_GET_32 bfd_h_get_32
+#define H_GET_16 bfd_h_get_16
+#define H_GET_8 bfd_h_get_8
+#define H_GET_S64 bfd_h_get_signed_64
+#define H_GET_S32 bfd_h_get_signed_32
+#define H_GET_S16 bfd_h_get_signed_16
+#define H_GET_S8 bfd_h_get_signed_8
+
+
+/* Extracted from bfdio.c. */
+long bfd_get_mtime (bfd *abfd);
+
+long bfd_get_size (bfd *abfd);
+
+/* Extracted from bfdwin.c. */
+/* Extracted from section.c. */
+typedef struct bfd_section
+{
+ /* The name of the section; the name isn't a copy, the pointer is
+ the same as that passed to bfd_make_section. */
+ const char *name;
+
+ /* A unique sequence number. */
+ int id;
+
+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
+ int index;
+
+ /* The next section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *next;
+
+ /* The previous section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *prev;
+
+ /* The field flags contains attributes of the section. Some
+ flags are read in from the object file, and some are
+ synthesized from other information. */
+ flagword flags;
+
+#define SEC_NO_FLAGS 0x000
+
+ /* Tells the OS to allocate space for this section when loading.
+ This is clear for a section containing debug information only. */
+#define SEC_ALLOC 0x001
+
+ /* Tells the OS to load the section from the file when loading.
+ This is clear for a .bss section. */
+#define SEC_LOAD 0x002
+
+ /* The section contains data still to be relocated, so there is
+ some relocation information too. */
+#define SEC_RELOC 0x004
+
+ /* A signal to the OS that the section contains read only data. */
+#define SEC_READONLY 0x008
+
+ /* The section contains code only. */
+#define SEC_CODE 0x010
+
+ /* The section contains data only. */
+#define SEC_DATA 0x020
+
+ /* The section will reside in ROM. */
+#define SEC_ROM 0x040
+
+ /* The section contains constructor information. This section
+ type is used by the linker to create lists of constructors and
+ destructors used by <<g++>>. When a back end sees a symbol
+ which should be used in a constructor list, it creates a new
+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
+ the symbol to it, and builds a relocation. To build the lists
+ of constructors, all the linker has to do is catenate all the
+ sections called <<__CTOR_LIST__>> and relocate the data
+ contained within - exactly the operations it would peform on
+ standard data. */
+#define SEC_CONSTRUCTOR 0x080
+
+ /* The section has contents - a data section could be
+ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
+ <<SEC_HAS_CONTENTS>> */
+#define SEC_HAS_CONTENTS 0x100
+
+ /* An instruction to the linker to not output the section
+ even if it has information which would normally be written. */
+#define SEC_NEVER_LOAD 0x200
+
+ /* The section contains thread local data. */
+#define SEC_THREAD_LOCAL 0x400
+
+ /* The section has GOT references. This flag is only for the
+ linker, and is currently only used by the elf32-hppa back end.
+ It will be set if global offset table references were detected
+ in this section, which indicate to the linker that the section
+ contains PIC code, and must be handled specially when doing a
+ static link. */
+#define SEC_HAS_GOT_REF 0x800
+
+ /* The section contains common symbols (symbols may be defined
+ multiple times, the value of a symbol is the amount of
+ space it requires, and the largest symbol value is the one
+ used). Most targets have exactly one of these (which we
+ translate to bfd_com_section_ptr), but ECOFF has two. */
+#define SEC_IS_COMMON 0x1000
+
+ /* The section contains only debugging information. For
+ example, this is set for ELF .debug and .stab sections.
+ strip tests this flag to see if a section can be
+ discarded. */
+#define SEC_DEBUGGING 0x2000
+
+ /* The contents of this section are held in memory pointed to
+ by the contents field. This is checked by bfd_get_section_contents,
+ and the data is retrieved from memory if appropriate. */
+#define SEC_IN_MEMORY 0x4000
+
+ /* The contents of this section are to be excluded by the
+ linker for executable and shared objects unless those
+ objects are to be further relocated. */
+#define SEC_EXCLUDE 0x8000
+
+ /* The contents of this section are to be sorted based on the sum of
+ the symbol and addend values specified by the associated relocation
+ entries. Entries without associated relocation entries will be
+ appended to the end of the section in an unspecified order. */
+#define SEC_SORT_ENTRIES 0x10000
+
+ /* When linking, duplicate sections of the same name should be
+ discarded, rather than being combined into a single section as
+ is usually done. This is similar to how common symbols are
+ handled. See SEC_LINK_DUPLICATES below. */
+#define SEC_LINK_ONCE 0x20000
+
+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
+ should handle duplicate sections. */
+#define SEC_LINK_DUPLICATES 0x40000
+
+ /* This value for SEC_LINK_DUPLICATES means that duplicate
+ sections with the same name should simply be discarded. */
+#define SEC_LINK_DUPLICATES_DISCARD 0x0
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if there are any duplicate sections, although
+ it should still only link one copy. */
+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections are a different size. */
+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections contain different
+ contents. */
+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
+
+ /* This section was created by the linker as part of dynamic
+ relocation or other arcane processing. It is skipped when
+ going through the first-pass output, trusting that someone
+ else up the line will take care of it later. */
+#define SEC_LINKER_CREATED 0x200000
+
+ /* This section should not be subject to garbage collection. */
+#define SEC_KEEP 0x400000
+
+ /* This section contains "short" data, and should be placed
+ "near" the GP. */
+#define SEC_SMALL_DATA 0x800000
+
+ /* Attempt to merge identical entities in the section.
+ Entity size is given in the entsize field. */
+#define SEC_MERGE 0x1000000
+
+ /* If given with SEC_MERGE, entities to merge are zero terminated
+ strings where entsize specifies character size instead of fixed
+ size entries. */
+#define SEC_STRINGS 0x2000000
+
+ /* This section contains data about section groups. */
+#define SEC_GROUP 0x4000000
+
+ /* The section is a COFF shared library section. This flag is
+ only for the linker. If this type of section appears in
+ the input file, the linker must copy it to the output file
+ without changing the vma or size. FIXME: Although this
+ was originally intended to be general, it really is COFF
+ specific (and the flag was renamed to indicate this). It
+ might be cleaner to have some more general mechanism to
+ allow the back end to control what the linker does with
+ sections. */
+#define SEC_COFF_SHARED_LIBRARY 0x10000000
+
+ /* This section contains data which may be shared with other
+ executables or shared objects. This is for COFF only. */
+#define SEC_COFF_SHARED 0x20000000
+
+ /* When a section with this flag is being linked, then if the size of
+ the input section is less than a page, it should not cross a page
+ boundary. If the size of the input section is one page or more,
+ it should be aligned on a page boundary. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_BLOCK 0x40000000
+
+ /* Conditionally link this section; do not link if there are no
+ references found to any symbol in the section. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_CLINK 0x80000000
+
+ /* End of section flags. */
+
+ /* Some internal packed boolean fields. */
+
+ /* See the vma field. */
+ unsigned int user_set_vma : 1;
+
+ /* A mark flag used by some of the linker backends. */
+ unsigned int linker_mark : 1;
+
+ /* Another mark flag used by some of the linker backends. Set for
+ output sections that have an input section. */
+ unsigned int linker_has_input : 1;
+
+ /* Mark flags used by some linker backends for garbage collection. */
+ unsigned int gc_mark : 1;
+ unsigned int gc_mark_from_eh : 1;
+
+ /* The following flags are used by the ELF linker. */
+
+ /* Mark sections which have been allocated to segments. */
+ unsigned int segment_mark : 1;
+
+ /* Type of sec_info information. */
+ unsigned int sec_info_type:3;
+#define ELF_INFO_TYPE_NONE 0
+#define ELF_INFO_TYPE_STABS 1
+#define ELF_INFO_TYPE_MERGE 2
+#define ELF_INFO_TYPE_EH_FRAME 3
+#define ELF_INFO_TYPE_JUST_SYMS 4
+
+ /* Nonzero if this section uses RELA relocations, rather than REL. */
+ unsigned int use_rela_p:1;
+
+ /* Bits used by various backends. The generic code doesn't touch
+ these fields. */
+
+ /* Nonzero if this section has TLS related relocations. */
+ unsigned int has_tls_reloc:1;
+
+ /* Nonzero if this section has a gp reloc. */
+ unsigned int has_gp_reloc:1;
+
+ /* Nonzero if this section needs the relax finalize pass. */
+ unsigned int need_finalize_relax:1;
+
+ /* Whether relocations have been processed. */
+ unsigned int reloc_done : 1;
+
+ /* End of internal packed boolean fields. */
+
+ /* The virtual memory address of the section - where it will be
+ at run time. The symbols are relocated against this. The
+ user_set_vma flag is maintained by bfd; if it's not set, the
+ backend can assign addresses (for example, in <<a.out>>, where
+ the default address for <<.data>> is dependent on the specific
+ target and various flags). */
+ bfd_vma vma;
+
+ /* The load address of the section - where it would be in a
+ rom image; really only used for writing section header
+ information. */
+ bfd_vma lma;
+
+ /* The size of the section in octets, as it will be output.
+ Contains a value even if the section has no contents (e.g., the
+ size of <<.bss>>). */
+ bfd_size_type size;
+
+ /* For input sections, the original size on disk of the section, in
+ octets. This field is used by the linker relaxation code. It is
+ currently only set for sections where the linker relaxation scheme
+ doesn't cache altered section and reloc contents (stabs, eh_frame,
+ SEC_MERGE, some coff relaxing targets), and thus the original size
+ needs to be kept to read the section multiple times.
+ For output sections, rawsize holds the section size calculated on
+ a previous linker relaxation pass. */
+ bfd_size_type rawsize;
+
+ /* If this section is going to be output, then this value is the
+ offset in *bytes* into the output section of the first byte in the
+ input section (byte ==> smallest addressable unit on the
+ target). In most cases, if this was going to start at the
+ 100th octet (8-bit quantity) in the output section, this value
+ would be 100. However, if the target byte size is 16 bits
+ (bfd_octets_per_byte is "2"), this value would be 50. */
+ bfd_vma output_offset;
+
+ /* The output section through which to map on output. */
+ struct bfd_section *output_section;
+
+ /* The alignment requirement of the section, as an exponent of 2 -
+ e.g., 3 aligns to 2^3 (or 8). */
+ unsigned int alignment_power;
+
+ /* If an input section, a pointer to a vector of relocation
+ records for the data in this section. */
+ struct reloc_cache_entry *relocation;
+
+ /* If an output section, a pointer to a vector of pointers to
+ relocation records for the data in this section. */
+ struct reloc_cache_entry **orelocation;
+
+ /* The number of relocation records in one of the above. */
+ unsigned reloc_count;
+
+ /* Information below is back end specific - and not always used
+ or updated. */
+
+ /* File position of section data. */
+ file_ptr filepos;
+
+ /* File position of relocation info. */
+ file_ptr rel_filepos;
+
+ /* File position of line data. */
+ file_ptr line_filepos;
+
+ /* Pointer to data for applications. */
+ void *userdata;
+
+ /* If the SEC_IN_MEMORY flag is set, this points to the actual
+ contents. */
+ unsigned char *contents;
+
+ /* Attached line number information. */
+ alent *lineno;
+
+ /* Number of line number records. */
+ unsigned int lineno_count;
+
+ /* Entity size for merging purposes. */
+ unsigned int entsize;
+
+ /* Points to the kept section if this section is a link-once section,
+ and is discarded. */
+ struct bfd_section *kept_section;
+
+ /* When a section is being output, this value changes as more
+ linenumbers are written out. */
+ file_ptr moving_line_filepos;
+
+ /* What the section number is in the target world. */
+ int target_index;
+
+ void *used_by_bfd;
+
+ /* If this is a constructor section then here is a list of the
+ relocations created to relocate items within it. */
+ struct relent_chain *constructor_chain;
+
+ /* The BFD which owns the section. */
+ bfd *owner;
+
+ /* A symbol which points at this section only. */
+ struct bfd_symbol *symbol;
+ struct bfd_symbol **symbol_ptr_ptr;
+
+ /* Early in the link process, map_head and map_tail are used to build
+ a list of input sections attached to an output section. Later,
+ output sections use these fields for a list of bfd_link_order
+ structs. */
+ union {
+ struct bfd_link_order *link_order;
+ struct bfd_section *s;
+ } map_head, map_tail;
+} asection;
+
+/* These sections are global, and are managed by BFD. The application
+ and target back end are not permitted to change the values in
+ these sections. New code should use the section_ptr macros rather
+ than referring directly to the const sections. The const sections
+ may eventually vanish. */
+#define BFD_ABS_SECTION_NAME "*ABS*"
+#define BFD_UND_SECTION_NAME "*UND*"
+#define BFD_COM_SECTION_NAME "*COM*"
+#define BFD_IND_SECTION_NAME "*IND*"
+
+/* The absolute section. */
+extern asection bfd_abs_section;
+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section)
+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
+/* Pointer to the undefined section. */
+extern asection bfd_und_section;
+#define bfd_und_section_ptr ((asection *) &bfd_und_section)
+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
+/* Pointer to the common section. */
+extern asection bfd_com_section;
+#define bfd_com_section_ptr ((asection *) &bfd_com_section)
+/* Pointer to the indirect section. */
+extern asection bfd_ind_section;
+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section)
+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
+
+#define bfd_is_const_section(SEC) \
+ ( ((SEC) == bfd_abs_section_ptr) \
+ || ((SEC) == bfd_und_section_ptr) \
+ || ((SEC) == bfd_com_section_ptr) \
+ || ((SEC) == bfd_ind_section_ptr))
+
+extern const struct bfd_symbol * const bfd_abs_symbol;
+extern const struct bfd_symbol * const bfd_com_symbol;
+extern const struct bfd_symbol * const bfd_und_symbol;
+extern const struct bfd_symbol * const bfd_ind_symbol;
+
+/* Macros to handle insertion and deletion of a bfd's sections. These
+ only handle the list pointers, ie. do not adjust section_count,
+ target_index etc. */
+#define bfd_section_list_remove(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ asection *_next = _s->next; \
+ asection *_prev = _s->prev; \
+ if (_prev) \
+ _prev->next = _next; \
+ else \
+ (ABFD)->sections = _next; \
+ if (_next) \
+ _next->prev = _prev; \
+ else \
+ (ABFD)->section_last = _prev; \
+ } \
+ while (0)
+#define bfd_section_list_append(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->next = NULL; \
+ if (_abfd->section_last) \
+ { \
+ _s->prev = _abfd->section_last; \
+ _abfd->section_last->next = _s; \
+ } \
+ else \
+ { \
+ _s->prev = NULL; \
+ _abfd->sections = _s; \
+ } \
+ _abfd->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_prepend(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->prev = NULL; \
+ if (_abfd->sections) \
+ { \
+ _s->next = _abfd->sections; \
+ _abfd->sections->prev = _s; \
+ } \
+ else \
+ { \
+ _s->next = NULL; \
+ _abfd->section_last = _s; \
+ } \
+ _abfd->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_after(ABFD, A, S) \
+ do \
+ { \
+ asection *_a = A; \
+ asection *_s = S; \
+ asection *_next = _a->next; \
+ _s->next = _next; \
+ _s->prev = _a; \
+ _a->next = _s; \
+ if (_next) \
+ _next->prev = _s; \
+ else \
+ (ABFD)->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_before(ABFD, B, S) \
+ do \
+ { \
+ asection *_b = B; \
+ asection *_s = S; \
+ asection *_prev = _b->prev; \
+ _s->prev = _prev; \
+ _s->next = _b; \
+ _b->prev = _s; \
+ if (_prev) \
+ _prev->next = _s; \
+ else \
+ (ABFD)->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_removed_from_list(ABFD, S) \
+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
+
+void bfd_section_list_clear (bfd *);
+
+asection *bfd_get_section_by_name (bfd *abfd, const char *name);
+
+asection *bfd_get_section_by_name_if
+ (bfd *abfd,
+ const char *name,
+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+char *bfd_get_unique_section_name
+ (bfd *abfd, const char *templat, int *count);
+
+asection *bfd_make_section_old_way (bfd *abfd, const char *name);
+
+asection *bfd_make_section_anyway_with_flags
+ (bfd *abfd, const char *name, flagword flags);
+
+asection *bfd_make_section_anyway (bfd *abfd, const char *name);
+
+asection *bfd_make_section_with_flags
+ (bfd *, const char *name, flagword flags);
+
+asection *bfd_make_section (bfd *, const char *name);
+
+bfd_boolean bfd_set_section_flags
+ (bfd *abfd, asection *sec, flagword flags);
+
+void bfd_map_over_sections
+ (bfd *abfd,
+ void (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+asection *bfd_sections_find_if
+ (bfd *abfd,
+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+bfd_boolean bfd_set_section_size
+ (bfd *abfd, asection *sec, bfd_size_type val);
+
+bfd_boolean bfd_set_section_contents
+ (bfd *abfd, asection *section, const void *data,
+ file_ptr offset, bfd_size_type count);
+
+bfd_boolean bfd_get_section_contents
+ (bfd *abfd, asection *section, void *location, file_ptr offset,
+ bfd_size_type count);
+
+bfd_boolean bfd_malloc_and_get_section
+ (bfd *abfd, asection *section, bfd_byte **buf);
+
+bfd_boolean bfd_copy_private_section_data
+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
+
+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
+ BFD_SEND (obfd, _bfd_copy_private_section_data, \
+ (ibfd, isection, obfd, osection))
+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
+
+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
+
+/* Extracted from archures.c. */
+enum bfd_architecture
+{
+ bfd_arch_unknown, /* File arch not known. */
+ bfd_arch_obscure, /* Arch known, not one of these. */
+ bfd_arch_m68k, /* Motorola 68xxx */
+#define bfd_mach_m68000 1
+#define bfd_mach_m68008 2
+#define bfd_mach_m68010 3
+#define bfd_mach_m68020 4
+#define bfd_mach_m68030 5
+#define bfd_mach_m68040 6
+#define bfd_mach_m68060 7
+#define bfd_mach_cpu32 8
+#define bfd_mach_mcf5200 9
+#define bfd_mach_mcf5206e 10
+#define bfd_mach_mcf5307 11
+#define bfd_mach_mcf5407 12
+#define bfd_mach_mcf528x 13
+#define bfd_mach_mcfv4e 14
+#define bfd_mach_mcf521x 15
+#define bfd_mach_mcf5249 16
+#define bfd_mach_mcf547x 17
+#define bfd_mach_mcf548x 18
+ bfd_arch_vax, /* DEC Vax */
+ bfd_arch_i960, /* Intel 960 */
+ /* The order of the following is important.
+ lower number indicates a machine type that
+ only accepts a subset of the instructions
+ available to machines with higher numbers.
+ The exception is the "ca", which is
+ incompatible with all other machines except
+ "core". */
+
+#define bfd_mach_i960_core 1
+#define bfd_mach_i960_ka_sa 2
+#define bfd_mach_i960_kb_sb 3
+#define bfd_mach_i960_mc 4
+#define bfd_mach_i960_xa 5
+#define bfd_mach_i960_ca 6
+#define bfd_mach_i960_jx 7
+#define bfd_mach_i960_hx 8
+
+ bfd_arch_or32, /* OpenRISC 32 */
+
+ bfd_arch_a29k, /* AMD 29000 */
+ bfd_arch_sparc, /* SPARC */
+#define bfd_mach_sparc 1
+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
+#define bfd_mach_sparc_sparclet 2
+#define bfd_mach_sparc_sparclite 3
+#define bfd_mach_sparc_v8plus 4
+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_sparclite_le 6
+#define bfd_mach_sparc_v9 7
+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
+/* Nonzero if MACH has the v9 instruction set. */
+#define bfd_mach_sparc_v9_p(mach) \
+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
+ && (mach) != bfd_mach_sparc_sparclite_le)
+/* Nonzero if MACH is a 64 bit sparc architecture. */
+#define bfd_mach_sparc_64bit_p(mach) \
+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
+ bfd_arch_mips, /* MIPS Rxxxx */
+#define bfd_mach_mips3000 3000
+#define bfd_mach_mips3900 3900
+#define bfd_mach_mips4000 4000
+#define bfd_mach_mips4010 4010
+#define bfd_mach_mips4100 4100
+#define bfd_mach_mips4111 4111
+#define bfd_mach_mips4120 4120
+#define bfd_mach_mips4300 4300
+#define bfd_mach_mips4400 4400
+#define bfd_mach_mips4600 4600
+#define bfd_mach_mips4650 4650
+#define bfd_mach_mips5000 5000
+#define bfd_mach_mips5400 5400
+#define bfd_mach_mips5500 5500
+#define bfd_mach_mips6000 6000
+#define bfd_mach_mips7000 7000
+#define bfd_mach_mips8000 8000
+#define bfd_mach_mips9000 9000
+#define bfd_mach_mips10000 10000
+#define bfd_mach_mips12000 12000
+#define bfd_mach_mips16 16
+#define bfd_mach_mips5 5
+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
+#define bfd_mach_mipsisa32 32
+#define bfd_mach_mipsisa32r2 33
+#define bfd_mach_mipsisa64 64
+#define bfd_mach_mipsisa64r2 65
+ bfd_arch_i386, /* Intel 386 */
+#define bfd_mach_i386_i386 1
+#define bfd_mach_i386_i8086 2
+#define bfd_mach_i386_i386_intel_syntax 3
+#define bfd_mach_x86_64 64
+#define bfd_mach_x86_64_intel_syntax 65
+ bfd_arch_we32k, /* AT&T WE32xxx */
+ bfd_arch_tahoe, /* CCI/Harris Tahoe */
+ bfd_arch_i860, /* Intel 860 */
+ bfd_arch_i370, /* IBM 360/370 Mainframes */
+ bfd_arch_romp, /* IBM ROMP PC/RT */
+ bfd_arch_alliant, /* Alliant */
+ bfd_arch_convex, /* Convex */
+ bfd_arch_m88k, /* Motorola 88xxx */
+ bfd_arch_m98k, /* Motorola 98xxx */
+ bfd_arch_pyramid, /* Pyramid Technology */
+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
+#define bfd_mach_h8300 1
+#define bfd_mach_h8300h 2
+#define bfd_mach_h8300s 3
+#define bfd_mach_h8300hn 4
+#define bfd_mach_h8300sn 5
+#define bfd_mach_h8300sx 6
+#define bfd_mach_h8300sxn 7
+ bfd_arch_pdp11, /* DEC PDP-11 */
+ bfd_arch_powerpc, /* PowerPC */
+#define bfd_mach_ppc 32
+#define bfd_mach_ppc64 64
+#define bfd_mach_ppc_403 403
+#define bfd_mach_ppc_403gc 4030
+#define bfd_mach_ppc_505 505
+#define bfd_mach_ppc_601 601
+#define bfd_mach_ppc_602 602
+#define bfd_mach_ppc_603 603
+#define bfd_mach_ppc_ec603e 6031
+#define bfd_mach_ppc_604 604
+#define bfd_mach_ppc_620 620
+#define bfd_mach_ppc_630 630
+#define bfd_mach_ppc_750 750
+#define bfd_mach_ppc_860 860
+#define bfd_mach_ppc_a35 35
+#define bfd_mach_ppc_rs64ii 642
+#define bfd_mach_ppc_rs64iii 643
+#define bfd_mach_ppc_7400 7400
+#define bfd_mach_ppc_e500 500
+ bfd_arch_rs6000, /* IBM RS/6000 */
+#define bfd_mach_rs6k 6000
+#define bfd_mach_rs6k_rs1 6001
+#define bfd_mach_rs6k_rsc 6003
+#define bfd_mach_rs6k_rs2 6002
+ bfd_arch_hppa, /* HP PA RISC */
+#define bfd_mach_hppa10 10
+#define bfd_mach_hppa11 11
+#define bfd_mach_hppa20 20
+#define bfd_mach_hppa20w 25
+ bfd_arch_d10v, /* Mitsubishi D10V */
+#define bfd_mach_d10v 1
+#define bfd_mach_d10v_ts2 2
+#define bfd_mach_d10v_ts3 3
+ bfd_arch_d30v, /* Mitsubishi D30V */
+ bfd_arch_dlx, /* DLX */
+ bfd_arch_m68hc11, /* Motorola 68HC11 */
+ bfd_arch_m68hc12, /* Motorola 68HC12 */
+#define bfd_mach_m6812_default 0
+#define bfd_mach_m6812 1
+#define bfd_mach_m6812s 2
+ bfd_arch_z8k, /* Zilog Z8000 */
+#define bfd_mach_z8001 1
+#define bfd_mach_z8002 2
+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
+#define bfd_mach_sh 1
+#define bfd_mach_sh2 0x20
+#define bfd_mach_sh_dsp 0x2d
+#define bfd_mach_sh2a 0x2a
+#define bfd_mach_sh2a_nofpu 0x2b
+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
+#define bfd_mach_sh2a_or_sh4 0x2a3
+#define bfd_mach_sh2a_or_sh3e 0x2a4
+#define bfd_mach_sh2e 0x2e
+#define bfd_mach_sh3 0x30
+#define bfd_mach_sh3_nommu 0x31
+#define bfd_mach_sh3_dsp 0x3d
+#define bfd_mach_sh3e 0x3e
+#define bfd_mach_sh4 0x40
+#define bfd_mach_sh4_nofpu 0x41
+#define bfd_mach_sh4_nommu_nofpu 0x42
+#define bfd_mach_sh4a 0x4a
+#define bfd_mach_sh4a_nofpu 0x4b
+#define bfd_mach_sh4al_dsp 0x4d
+#define bfd_mach_sh5 0x50
+ bfd_arch_alpha, /* Dec Alpha */
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_arm, /* Advanced Risc Machines ARM. */
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+ bfd_arch_ns32k, /* National Semiconductors ns32000 */
+ bfd_arch_w65, /* WDC 65816 */
+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
+#define bfd_mach_tic3x 30
+#define bfd_mach_tic4x 40
+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
+ bfd_arch_v850, /* NEC V850 */
+#define bfd_mach_v850 1
+#define bfd_mach_v850e 'E'
+#define bfd_mach_v850e1 '1'
+ bfd_arch_arc, /* ARC Cores */
+#define bfd_mach_arc_5 5
+#define bfd_mach_arc_6 6
+#define bfd_mach_arc_7 7
+#define bfd_mach_arc_8 8
+ bfd_arch_m32c, /* Renesas M16C/M32C. */
+#define bfd_mach_m16c 0x75
+#define bfd_mach_m32c 0x78
+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
+#define bfd_mach_m32r 1 /* For backwards compatibility. */
+#define bfd_mach_m32rx 'x'
+#define bfd_mach_m32r2 '2'
+ bfd_arch_mn10200, /* Matsushita MN10200 */
+ bfd_arch_mn10300, /* Matsushita MN10300 */
+#define bfd_mach_mn10300 300
+#define bfd_mach_am33 330
+#define bfd_mach_am33_2 332
+ bfd_arch_fr30,
+#define bfd_mach_fr30 0x46523330
+ bfd_arch_frv,
+#define bfd_mach_frv 1
+#define bfd_mach_frvsimple 2
+#define bfd_mach_fr300 300
+#define bfd_mach_fr400 400
+#define bfd_mach_fr450 450
+#define bfd_mach_frvtomcat 499 /* fr500 prototype */
+#define bfd_mach_fr500 500
+#define bfd_mach_fr550 550
+ bfd_arch_mcore,
+ bfd_arch_ia64, /* HP/Intel ia64 */
+#define bfd_mach_ia64_elf64 64
+#define bfd_mach_ia64_elf32 32
+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
+#define bfd_mach_ip2022 1
+#define bfd_mach_ip2022ext 2
+ bfd_arch_iq2000, /* Vitesse IQ2000. */
+#define bfd_mach_iq2000 1
+#define bfd_mach_iq10 2
+ bfd_arch_ms1,
+#define bfd_mach_ms1 1
+#define bfd_mach_mrisc2 2
+ bfd_arch_pj,
+ bfd_arch_avr, /* Atmel AVR microcontrollers. */
+#define bfd_mach_avr1 1
+#define bfd_mach_avr2 2
+#define bfd_mach_avr3 3
+#define bfd_mach_avr4 4
+#define bfd_mach_avr5 5
+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
+#define bfd_mach_cr16c 1
+ bfd_arch_crx, /* National Semiconductor CRX. */
+#define bfd_mach_crx 1
+ bfd_arch_cris, /* Axis CRIS */
+#define bfd_mach_cris_v0_v10 255
+#define bfd_mach_cris_v32 32
+#define bfd_mach_cris_v10_v32 1032
+ bfd_arch_s390, /* IBM s390 */
+#define bfd_mach_s390_31 31
+#define bfd_mach_s390_64 64
+ bfd_arch_openrisc, /* OpenRISC */
+ bfd_arch_mmix, /* Donald Knuth's educational processor. */
+ bfd_arch_xstormy16,
+#define bfd_mach_xstormy16 1
+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
+#define bfd_mach_msp11 11
+#define bfd_mach_msp110 110
+#define bfd_mach_msp12 12
+#define bfd_mach_msp13 13
+#define bfd_mach_msp14 14
+#define bfd_mach_msp15 15
+#define bfd_mach_msp16 16
+#define bfd_mach_msp31 31
+#define bfd_mach_msp32 32
+#define bfd_mach_msp33 33
+#define bfd_mach_msp41 41
+#define bfd_mach_msp42 42
+#define bfd_mach_msp43 43
+#define bfd_mach_msp44 44
+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
+#define bfd_mach_xtensa 1
+ bfd_arch_maxq, /* Dallas MAXQ 10/20 */
+#define bfd_mach_maxq10 10
+#define bfd_mach_maxq20 20
+ bfd_arch_last
+ };
+
+typedef struct bfd_arch_info
+{
+ int bits_per_word;
+ int bits_per_address;
+ int bits_per_byte;
+ enum bfd_architecture arch;
+ unsigned long mach;
+ const char *arch_name;
+ const char *printable_name;
+ unsigned int section_align_power;
+ /* TRUE if this is the default machine for the architecture.
+ The default arch should be the first entry for an arch so that
+ all the entries for that arch can be accessed via <<next>>. */
+ bfd_boolean the_default;
+ const struct bfd_arch_info * (*compatible)
+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
+
+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
+
+ const struct bfd_arch_info *next;
+}
+bfd_arch_info_type;
+
+const char *bfd_printable_name (bfd *abfd);
+
+const bfd_arch_info_type *bfd_scan_arch (const char *string);
+
+const char **bfd_arch_list (void);
+
+const bfd_arch_info_type *bfd_arch_get_compatible
+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
+
+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
+
+enum bfd_architecture bfd_get_arch (bfd *abfd);
+
+unsigned long bfd_get_mach (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_address (bfd *abfd);
+
+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
+
+const bfd_arch_info_type *bfd_lookup_arch
+ (enum bfd_architecture arch, unsigned long machine);
+
+const char *bfd_printable_arch_mach
+ (enum bfd_architecture arch, unsigned long machine);
+
+unsigned int bfd_octets_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_mach_octets_per_byte
+ (enum bfd_architecture arch, unsigned long machine);
+
+/* Extracted from reloc.c. */
+typedef enum bfd_reloc_status
+{
+ /* No errors detected. */
+ bfd_reloc_ok,
+
+ /* The relocation was performed, but there was an overflow. */
+ bfd_reloc_overflow,
+
+ /* The address to relocate was not within the section supplied. */
+ bfd_reloc_outofrange,
+
+ /* Used by special functions. */
+ bfd_reloc_continue,
+
+ /* Unsupported relocation size requested. */
+ bfd_reloc_notsupported,
+
+ /* Unused. */
+ bfd_reloc_other,
+
+ /* The symbol to relocate against was undefined. */
+ bfd_reloc_undefined,
+
+ /* The relocation was performed, but may not be ok - presently
+ generated only when linking i960 coff files with i960 b.out
+ symbols. If this type is returned, the error_message argument
+ to bfd_perform_relocation will be set. */
+ bfd_reloc_dangerous
+ }
+ bfd_reloc_status_type;
+
+
+typedef struct reloc_cache_entry
+{
+ /* A pointer into the canonical table of pointers. */
+ struct bfd_symbol **sym_ptr_ptr;
+
+ /* offset in section. */
+ bfd_size_type address;
+
+ /* addend for relocation value. */
+ bfd_vma addend;
+
+ /* Pointer to how to perform the required relocation. */
+ reloc_howto_type *howto;
+
+}
+arelent;
+
+enum complain_overflow
+{
+ /* Do not complain on overflow. */
+ complain_overflow_dont,
+
+ /* Complain if the bitfield overflows, whether it is considered
+ as signed or unsigned. */
+ complain_overflow_bitfield,
+
+ /* Complain if the value overflows when considered as signed
+ number. */
+ complain_overflow_signed,
+
+ /* Complain if the value overflows when considered as an
+ unsigned number. */
+ complain_overflow_unsigned
+};
+
+struct reloc_howto_struct
+{
+ /* The type field has mainly a documentary use - the back end can
+ do what it wants with it, though normally the back end's
+ external idea of what a reloc number is stored
+ in this field. For example, a PC relative word relocation
+ in a coff environment has the type 023 - because that's
+ what the outside world calls a R_PCRWORD reloc. */
+ unsigned int type;
+
+ /* The value the final relocation is shifted right by. This drops
+ unwanted data from the relocation. */
+ unsigned int rightshift;
+
+ /* The size of the item to be relocated. This is *not* a
+ power-of-two measure. To get the number of bytes operated
+ on by a type of relocation, use bfd_get_reloc_size. */
+ int size;
+
+ /* The number of bits in the item to be relocated. This is used
+ when doing overflow checking. */
+ unsigned int bitsize;
+
+ /* Notes that the relocation is relative to the location in the
+ data section of the addend. The relocation function will
+ subtract from the relocation value the address of the location
+ being relocated. */
+ bfd_boolean pc_relative;
+
+ /* The bit position of the reloc value in the destination.
+ The relocated value is left shifted by this amount. */
+ unsigned int bitpos;
+
+ /* What type of overflow error should be checked for when
+ relocating. */
+ enum complain_overflow complain_on_overflow;
+
+ /* If this field is non null, then the supplied function is
+ called rather than the normal function. This allows really
+ strange relocation methods to be accommodated (e.g., i960 callj
+ instructions). */
+ bfd_reloc_status_type (*special_function)
+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
+ bfd *, char **);
+
+ /* The textual name of the relocation type. */
+ char *name;
+
+ /* Some formats record a relocation addend in the section contents
+ rather than with the relocation. For ELF formats this is the
+ distinction between USE_REL and USE_RELA (though the code checks
+ for USE_REL == 1/0). The value of this field is TRUE if the
+ addend is recorded with the section contents; when performing a
+ partial link (ld -r) the section contents (the data) will be
+ modified. The value of this field is FALSE if addends are
+ recorded with the relocation (in arelent.addend); when performing
+ a partial link the relocation will be modified.
+ All relocations for all ELF USE_RELA targets should set this field
+ to FALSE (values of TRUE should be looked on with suspicion).
+ However, the converse is not true: not all relocations of all ELF
+ USE_REL targets set this field to TRUE. Why this is so is peculiar
+ to each particular target. For relocs that aren't used in partial
+ links (e.g. GOT stuff) it doesn't matter what this is set to. */
+ bfd_boolean partial_inplace;
+
+ /* src_mask selects the part of the instruction (or data) to be used
+ in the relocation sum. If the target relocations don't have an
+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
+ dst_mask to extract the addend from the section contents. If
+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
+ field should be zero. Non-zero values for ELF USE_RELA targets are
+ bogus as in those cases the value in the dst_mask part of the
+ section contents should be treated as garbage. */
+ bfd_vma src_mask;
+
+ /* dst_mask selects which parts of the instruction (or data) are
+ replaced with a relocated value. */
+ bfd_vma dst_mask;
+
+ /* When some formats create PC relative instructions, they leave
+ the value of the pc of the place being relocated in the offset
+ slot of the instruction, so that a PC relative relocation can
+ be made just by adding in an ordinary offset (e.g., sun3 a.out).
+ Some formats leave the displacement part of an instruction
+ empty (e.g., m88k bcs); this flag signals the fact. */
+ bfd_boolean pcrel_offset;
+};
+
+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
+ NAME, FALSE, 0, 0, IN)
+
+#define EMPTY_HOWTO(C) \
+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
+ NULL, FALSE, 0, 0, FALSE)
+
+#define HOWTO_PREPARE(relocation, symbol) \
+ { \
+ if (symbol != NULL) \
+ { \
+ if (bfd_is_com_section (symbol->section)) \
+ { \
+ relocation = 0; \
+ } \
+ else \
+ { \
+ relocation = symbol->value; \
+ } \
+ } \
+ }
+
+unsigned int bfd_get_reloc_size (reloc_howto_type *);
+
+typedef struct relent_chain
+{
+ arelent relent;
+ struct relent_chain *next;
+}
+arelent_chain;
+
+bfd_reloc_status_type bfd_check_overflow
+ (enum complain_overflow how,
+ unsigned int bitsize,
+ unsigned int rightshift,
+ unsigned int addrsize,
+ bfd_vma relocation);
+
+bfd_reloc_status_type bfd_perform_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data,
+ asection *input_section,
+ bfd *output_bfd,
+ char **error_message);
+
+bfd_reloc_status_type bfd_install_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data, bfd_vma data_start,
+ asection *input_section,
+ char **error_message);
+
+enum bfd_reloc_code_real {
+ _dummy_first_bfd_reloc_code_real,
+
+
+/* Basic absolute relocations of N bits. */
+ BFD_RELOC_64,
+ BFD_RELOC_32,
+ BFD_RELOC_26,
+ BFD_RELOC_24,
+ BFD_RELOC_16,
+ BFD_RELOC_14,
+ BFD_RELOC_8,
+
+/* PC-relative relocations. Sometimes these are relative to the address
+of the relocation itself; sometimes they are relative to the start of
+the section containing the relocation. It depends on the specific target.
+
+The 24-bit relocation is used in some Intel 960 configurations. */
+ BFD_RELOC_64_PCREL,
+ BFD_RELOC_32_PCREL,
+ BFD_RELOC_24_PCREL,
+ BFD_RELOC_16_PCREL,
+ BFD_RELOC_12_PCREL,
+ BFD_RELOC_8_PCREL,
+
+/* Section relative relocations. Some targets need this for DWARF2. */
+ BFD_RELOC_32_SECREL,
+
+/* For ELF. */
+ BFD_RELOC_32_GOT_PCREL,
+ BFD_RELOC_16_GOT_PCREL,
+ BFD_RELOC_8_GOT_PCREL,
+ BFD_RELOC_32_GOTOFF,
+ BFD_RELOC_16_GOTOFF,
+ BFD_RELOC_LO16_GOTOFF,
+ BFD_RELOC_HI16_GOTOFF,
+ BFD_RELOC_HI16_S_GOTOFF,
+ BFD_RELOC_8_GOTOFF,
+ BFD_RELOC_64_PLT_PCREL,
+ BFD_RELOC_32_PLT_PCREL,
+ BFD_RELOC_24_PLT_PCREL,
+ BFD_RELOC_16_PLT_PCREL,
+ BFD_RELOC_8_PLT_PCREL,
+ BFD_RELOC_64_PLTOFF,
+ BFD_RELOC_32_PLTOFF,
+ BFD_RELOC_16_PLTOFF,
+ BFD_RELOC_LO16_PLTOFF,
+ BFD_RELOC_HI16_PLTOFF,
+ BFD_RELOC_HI16_S_PLTOFF,
+ BFD_RELOC_8_PLTOFF,
+
+/* Relocations used by 68K ELF. */
+ BFD_RELOC_68K_GLOB_DAT,
+ BFD_RELOC_68K_JMP_SLOT,
+ BFD_RELOC_68K_RELATIVE,
+
+/* Linkage-table relative. */
+ BFD_RELOC_32_BASEREL,
+ BFD_RELOC_16_BASEREL,
+ BFD_RELOC_LO16_BASEREL,
+ BFD_RELOC_HI16_BASEREL,
+ BFD_RELOC_HI16_S_BASEREL,
+ BFD_RELOC_8_BASEREL,
+ BFD_RELOC_RVA,
+
+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
+ BFD_RELOC_8_FFnn,
+
+/* These PC-relative relocations are stored as word displacements --
+i.e., byte displacements shifted right two bits. The 30-bit word
+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
+SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
+signed 16-bit displacement is used on the MIPS, and the 23-bit
+displacement is used on the Alpha. */
+ BFD_RELOC_32_PCREL_S2,
+ BFD_RELOC_16_PCREL_S2,
+ BFD_RELOC_23_PCREL_S2,
+
+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
+the target word. These are used on the SPARC. */
+ BFD_RELOC_HI22,
+ BFD_RELOC_LO10,
+
+/* For systems that allocate a Global Pointer register, these are
+displacements off that register. These relocation types are
+handled specially, because the value the register will have is
+decided relatively late. */
+ BFD_RELOC_GPREL16,
+ BFD_RELOC_GPREL32,
+
+/* Reloc types used for i960/b.out. */
+ BFD_RELOC_I960_CALLJ,
+
+/* SPARC ELF relocations. There is probably some overlap with other
+relocation types already defined. */
+ BFD_RELOC_NONE,
+ BFD_RELOC_SPARC_WDISP22,
+ BFD_RELOC_SPARC22,
+ BFD_RELOC_SPARC13,
+ BFD_RELOC_SPARC_GOT10,
+ BFD_RELOC_SPARC_GOT13,
+ BFD_RELOC_SPARC_GOT22,
+ BFD_RELOC_SPARC_PC10,
+ BFD_RELOC_SPARC_PC22,
+ BFD_RELOC_SPARC_WPLT30,
+ BFD_RELOC_SPARC_COPY,
+ BFD_RELOC_SPARC_GLOB_DAT,
+ BFD_RELOC_SPARC_JMP_SLOT,
+ BFD_RELOC_SPARC_RELATIVE,
+ BFD_RELOC_SPARC_UA16,
+ BFD_RELOC_SPARC_UA32,
+ BFD_RELOC_SPARC_UA64,
+
+/* I think these are specific to SPARC a.out (e.g., Sun 4). */
+ BFD_RELOC_SPARC_BASE13,
+ BFD_RELOC_SPARC_BASE22,
+
+/* SPARC64 relocations */
+#define BFD_RELOC_SPARC_64 BFD_RELOC_64
+ BFD_RELOC_SPARC_10,
+ BFD_RELOC_SPARC_11,
+ BFD_RELOC_SPARC_OLO10,
+ BFD_RELOC_SPARC_HH22,
+ BFD_RELOC_SPARC_HM10,
+ BFD_RELOC_SPARC_LM22,
+ BFD_RELOC_SPARC_PC_HH22,
+ BFD_RELOC_SPARC_PC_HM10,
+ BFD_RELOC_SPARC_PC_LM22,
+ BFD_RELOC_SPARC_WDISP16,
+ BFD_RELOC_SPARC_WDISP19,
+ BFD_RELOC_SPARC_7,
+ BFD_RELOC_SPARC_6,
+ BFD_RELOC_SPARC_5,
+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
+ BFD_RELOC_SPARC_PLT32,
+ BFD_RELOC_SPARC_PLT64,
+ BFD_RELOC_SPARC_HIX22,
+ BFD_RELOC_SPARC_LOX10,
+ BFD_RELOC_SPARC_H44,
+ BFD_RELOC_SPARC_M44,
+ BFD_RELOC_SPARC_L44,
+ BFD_RELOC_SPARC_REGISTER,
+
+/* SPARC little endian relocation */
+ BFD_RELOC_SPARC_REV32,
+
+/* SPARC TLS relocations */
+ BFD_RELOC_SPARC_TLS_GD_HI22,
+ BFD_RELOC_SPARC_TLS_GD_LO10,
+ BFD_RELOC_SPARC_TLS_GD_ADD,
+ BFD_RELOC_SPARC_TLS_GD_CALL,
+ BFD_RELOC_SPARC_TLS_LDM_HI22,
+ BFD_RELOC_SPARC_TLS_LDM_LO10,
+ BFD_RELOC_SPARC_TLS_LDM_ADD,
+ BFD_RELOC_SPARC_TLS_LDM_CALL,
+ BFD_RELOC_SPARC_TLS_LDO_HIX22,
+ BFD_RELOC_SPARC_TLS_LDO_LOX10,
+ BFD_RELOC_SPARC_TLS_LDO_ADD,
+ BFD_RELOC_SPARC_TLS_IE_HI22,
+ BFD_RELOC_SPARC_TLS_IE_LO10,
+ BFD_RELOC_SPARC_TLS_IE_LD,
+ BFD_RELOC_SPARC_TLS_IE_LDX,
+ BFD_RELOC_SPARC_TLS_IE_ADD,
+ BFD_RELOC_SPARC_TLS_LE_HIX22,
+ BFD_RELOC_SPARC_TLS_LE_LOX10,
+ BFD_RELOC_SPARC_TLS_DTPMOD32,
+ BFD_RELOC_SPARC_TLS_DTPMOD64,
+ BFD_RELOC_SPARC_TLS_DTPOFF32,
+ BFD_RELOC_SPARC_TLS_DTPOFF64,
+ BFD_RELOC_SPARC_TLS_TPOFF32,
+ BFD_RELOC_SPARC_TLS_TPOFF64,
+
+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
+"addend" in some special way.
+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
+writing; when reading, it will be the absolute section symbol. The
+addend is the displacement in bytes of the "lda" instruction from
+the "ldah" instruction (which is at the address of this reloc). */
+ BFD_RELOC_ALPHA_GPDISP_HI16,
+
+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
+with GPDISP_HI16 relocs. The addend is ignored when writing the
+relocations out, and is filled in with the file's GP value on
+reading, for convenience. */
+ BFD_RELOC_ALPHA_GPDISP_LO16,
+
+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
+relocation except that there is no accompanying GPDISP_LO16
+relocation. */
+ BFD_RELOC_ALPHA_GPDISP,
+
+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
+the assembler turns it into a LDQ instruction to load the address of
+the symbol, and then fills in a register in the real instruction.
+
+The LITERAL reloc, at the LDQ instruction, refers to the .lita
+section symbol. The addend is ignored when writing, but is filled
+in with the file's GP value on reading, for convenience, as with the
+GPDISP_LO16 reloc.
+
+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
+It should refer to the symbol to be referenced, as with 16_GOTOFF,
+but it generates output not based on the position within the .got
+section, but relative to the GP value chosen for the file during the
+final link stage.
+
+The LITUSE reloc, on the instruction using the loaded address, gives
+information to the linker that it might be able to use to optimize
+away some literal section references. The symbol is ignored (read
+as the absolute section symbol), and the "addend" indicates the type
+of instruction using the register:
+1 - "memory" fmt insn
+2 - byte-manipulation (byte offset reg)
+3 - jsr (target of branch) */
+ BFD_RELOC_ALPHA_LITERAL,
+ BFD_RELOC_ALPHA_ELF_LITERAL,
+ BFD_RELOC_ALPHA_LITUSE,
+
+/* The HINT relocation indicates a value that should be filled into the
+"hint" field of a jmp/jsr/ret instruction, for possible branch-
+prediction logic which may be provided on some processors. */
+ BFD_RELOC_ALPHA_HINT,
+
+/* The LINKAGE relocation outputs a linkage pair in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_LINKAGE,
+
+/* The CODEADDR relocation outputs a STO_CA in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_CODEADDR,
+
+/* The GPREL_HI/LO relocations together form a 32-bit offset from the
+GP register. */
+ BFD_RELOC_ALPHA_GPREL_HI16,
+ BFD_RELOC_ALPHA_GPREL_LO16,
+
+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
+share a common GP, and the target address is adjusted for
+STO_ALPHA_STD_GPLOAD. */
+ BFD_RELOC_ALPHA_BRSGP,
+
+/* Alpha thread-local storage relocations. */
+ BFD_RELOC_ALPHA_TLSGD,
+ BFD_RELOC_ALPHA_TLSLDM,
+ BFD_RELOC_ALPHA_DTPMOD64,
+ BFD_RELOC_ALPHA_GOTDTPREL16,
+ BFD_RELOC_ALPHA_DTPREL64,
+ BFD_RELOC_ALPHA_DTPREL_HI16,
+ BFD_RELOC_ALPHA_DTPREL_LO16,
+ BFD_RELOC_ALPHA_DTPREL16,
+ BFD_RELOC_ALPHA_GOTTPREL16,
+ BFD_RELOC_ALPHA_TPREL64,
+ BFD_RELOC_ALPHA_TPREL_HI16,
+ BFD_RELOC_ALPHA_TPREL_LO16,
+ BFD_RELOC_ALPHA_TPREL16,
+
+/* Bits 27..2 of the relocation address shifted right 2 bits;
+simple reloc otherwise. */
+ BFD_RELOC_MIPS_JMP,
+
+/* The MIPS16 jump instruction. */
+ BFD_RELOC_MIPS16_JMP,
+
+/* MIPS16 GP relative reloc. */
+ BFD_RELOC_MIPS16_GPREL,
+
+/* High 16 bits of 32-bit value; simple reloc. */
+ BFD_RELOC_HI16,
+
+/* High 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_HI16_S,
+
+/* Low 16 bits. */
+ BFD_RELOC_LO16,
+
+/* High 16 bits of 32-bit pc-relative value */
+ BFD_RELOC_HI16_PCREL,
+
+/* High 16 bits of 32-bit pc-relative value, adjusted */
+ BFD_RELOC_HI16_S_PCREL,
+
+/* Low 16 bits of pc-relative value */
+ BFD_RELOC_LO16_PCREL,
+
+/* MIPS16 high 16 bits of 32-bit value. */
+ BFD_RELOC_MIPS16_HI16,
+
+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_MIPS16_HI16_S,
+
+/* MIPS16 low 16 bits. */
+ BFD_RELOC_MIPS16_LO16,
+
+/* Relocation against a MIPS literal section. */
+ BFD_RELOC_MIPS_LITERAL,
+
+/* MIPS ELF relocations. */
+ BFD_RELOC_MIPS_GOT16,
+ BFD_RELOC_MIPS_CALL16,
+ BFD_RELOC_MIPS_GOT_HI16,
+ BFD_RELOC_MIPS_GOT_LO16,
+ BFD_RELOC_MIPS_CALL_HI16,
+ BFD_RELOC_MIPS_CALL_LO16,
+ BFD_RELOC_MIPS_SUB,
+ BFD_RELOC_MIPS_GOT_PAGE,
+ BFD_RELOC_MIPS_GOT_OFST,
+ BFD_RELOC_MIPS_GOT_DISP,
+ BFD_RELOC_MIPS_SHIFT5,
+ BFD_RELOC_MIPS_SHIFT6,
+ BFD_RELOC_MIPS_INSERT_A,
+ BFD_RELOC_MIPS_INSERT_B,
+ BFD_RELOC_MIPS_DELETE,
+ BFD_RELOC_MIPS_HIGHEST,
+ BFD_RELOC_MIPS_HIGHER,
+ BFD_RELOC_MIPS_SCN_DISP,
+ BFD_RELOC_MIPS_REL16,
+ BFD_RELOC_MIPS_RELGOT,
+ BFD_RELOC_MIPS_JALR,
+ BFD_RELOC_MIPS_TLS_DTPMOD32,
+ BFD_RELOC_MIPS_TLS_DTPREL32,
+ BFD_RELOC_MIPS_TLS_DTPMOD64,
+ BFD_RELOC_MIPS_TLS_DTPREL64,
+ BFD_RELOC_MIPS_TLS_GD,
+ BFD_RELOC_MIPS_TLS_LDM,
+ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
+ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
+ BFD_RELOC_MIPS_TLS_GOTTPREL,
+ BFD_RELOC_MIPS_TLS_TPREL32,
+ BFD_RELOC_MIPS_TLS_TPREL64,
+ BFD_RELOC_MIPS_TLS_TPREL_HI16,
+ BFD_RELOC_MIPS_TLS_TPREL_LO16,
+
+
+/* Fujitsu Frv Relocations. */
+ BFD_RELOC_FRV_LABEL16,
+ BFD_RELOC_FRV_LABEL24,
+ BFD_RELOC_FRV_LO16,
+ BFD_RELOC_FRV_HI16,
+ BFD_RELOC_FRV_GPREL12,
+ BFD_RELOC_FRV_GPRELU12,
+ BFD_RELOC_FRV_GPREL32,
+ BFD_RELOC_FRV_GPRELHI,
+ BFD_RELOC_FRV_GPRELLO,
+ BFD_RELOC_FRV_GOT12,
+ BFD_RELOC_FRV_GOTHI,
+ BFD_RELOC_FRV_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC,
+ BFD_RELOC_FRV_FUNCDESC_GOT12,
+ BFD_RELOC_FRV_FUNCDESC_GOTHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC_VALUE,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_FRV_GOTOFF12,
+ BFD_RELOC_FRV_GOTOFFHI,
+ BFD_RELOC_FRV_GOTOFFLO,
+ BFD_RELOC_FRV_GETTLSOFF,
+ BFD_RELOC_FRV_TLSDESC_VALUE,
+ BFD_RELOC_FRV_GOTTLSDESC12,
+ BFD_RELOC_FRV_GOTTLSDESCHI,
+ BFD_RELOC_FRV_GOTTLSDESCLO,
+ BFD_RELOC_FRV_TLSMOFF12,
+ BFD_RELOC_FRV_TLSMOFFHI,
+ BFD_RELOC_FRV_TLSMOFFLO,
+ BFD_RELOC_FRV_GOTTLSOFF12,
+ BFD_RELOC_FRV_GOTTLSOFFHI,
+ BFD_RELOC_FRV_GOTTLSOFFLO,
+ BFD_RELOC_FRV_TLSOFF,
+ BFD_RELOC_FRV_TLSDESC_RELAX,
+ BFD_RELOC_FRV_GETTLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSMOFF,
+
+
+/* This is a 24bit GOT-relative reloc for the mn10300. */
+ BFD_RELOC_MN10300_GOTOFF24,
+
+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT32,
+
+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT24,
+
+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT16,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_MN10300_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_MN10300_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_MN10300_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_MN10300_RELATIVE,
+
+
+/* i386/elf relocations */
+ BFD_RELOC_386_GOT32,
+ BFD_RELOC_386_PLT32,
+ BFD_RELOC_386_COPY,
+ BFD_RELOC_386_GLOB_DAT,
+ BFD_RELOC_386_JUMP_SLOT,
+ BFD_RELOC_386_RELATIVE,
+ BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_386_GOTPC,
+ BFD_RELOC_386_TLS_TPOFF,
+ BFD_RELOC_386_TLS_IE,
+ BFD_RELOC_386_TLS_GOTIE,
+ BFD_RELOC_386_TLS_LE,
+ BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_386_TLS_LDM,
+ BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_386_TLS_DTPMOD32,
+ BFD_RELOC_386_TLS_DTPOFF32,
+ BFD_RELOC_386_TLS_TPOFF32,
+
+/* x86-64/elf relocations */
+ BFD_RELOC_X86_64_GOT32,
+ BFD_RELOC_X86_64_PLT32,
+ BFD_RELOC_X86_64_COPY,
+ BFD_RELOC_X86_64_GLOB_DAT,
+ BFD_RELOC_X86_64_JUMP_SLOT,
+ BFD_RELOC_X86_64_RELATIVE,
+ BFD_RELOC_X86_64_GOTPCREL,
+ BFD_RELOC_X86_64_32S,
+ BFD_RELOC_X86_64_DTPMOD64,
+ BFD_RELOC_X86_64_DTPOFF64,
+ BFD_RELOC_X86_64_TPOFF64,
+ BFD_RELOC_X86_64_TLSGD,
+ BFD_RELOC_X86_64_TLSLD,
+ BFD_RELOC_X86_64_DTPOFF32,
+ BFD_RELOC_X86_64_GOTTPOFF,
+ BFD_RELOC_X86_64_TPOFF32,
+ BFD_RELOC_X86_64_GOTOFF64,
+ BFD_RELOC_X86_64_GOTPC32,
+
+/* ns32k relocations */
+ BFD_RELOC_NS32K_IMM_8,
+ BFD_RELOC_NS32K_IMM_16,
+ BFD_RELOC_NS32K_IMM_32,
+ BFD_RELOC_NS32K_IMM_8_PCREL,
+ BFD_RELOC_NS32K_IMM_16_PCREL,
+ BFD_RELOC_NS32K_IMM_32_PCREL,
+ BFD_RELOC_NS32K_DISP_8,
+ BFD_RELOC_NS32K_DISP_16,
+ BFD_RELOC_NS32K_DISP_32,
+ BFD_RELOC_NS32K_DISP_8_PCREL,
+ BFD_RELOC_NS32K_DISP_16_PCREL,
+ BFD_RELOC_NS32K_DISP_32_PCREL,
+
+/* PDP11 relocations */
+ BFD_RELOC_PDP11_DISP_8_PCREL,
+ BFD_RELOC_PDP11_DISP_6_PCREL,
+
+/* Picojava relocs. Not all of these appear in object files. */
+ BFD_RELOC_PJ_CODE_HI16,
+ BFD_RELOC_PJ_CODE_LO16,
+ BFD_RELOC_PJ_CODE_DIR16,
+ BFD_RELOC_PJ_CODE_DIR32,
+ BFD_RELOC_PJ_CODE_REL16,
+ BFD_RELOC_PJ_CODE_REL32,
+
+/* Power(rs6000) and PowerPC relocations. */
+ BFD_RELOC_PPC_B26,
+ BFD_RELOC_PPC_BA26,
+ BFD_RELOC_PPC_TOC16,
+ BFD_RELOC_PPC_B16,
+ BFD_RELOC_PPC_B16_BRTAKEN,
+ BFD_RELOC_PPC_B16_BRNTAKEN,
+ BFD_RELOC_PPC_BA16,
+ BFD_RELOC_PPC_BA16_BRTAKEN,
+ BFD_RELOC_PPC_BA16_BRNTAKEN,
+ BFD_RELOC_PPC_COPY,
+ BFD_RELOC_PPC_GLOB_DAT,
+ BFD_RELOC_PPC_JMP_SLOT,
+ BFD_RELOC_PPC_RELATIVE,
+ BFD_RELOC_PPC_LOCAL24PC,
+ BFD_RELOC_PPC_EMB_NADDR32,
+ BFD_RELOC_PPC_EMB_NADDR16,
+ BFD_RELOC_PPC_EMB_NADDR16_LO,
+ BFD_RELOC_PPC_EMB_NADDR16_HI,
+ BFD_RELOC_PPC_EMB_NADDR16_HA,
+ BFD_RELOC_PPC_EMB_SDAI16,
+ BFD_RELOC_PPC_EMB_SDA2I16,
+ BFD_RELOC_PPC_EMB_SDA2REL,
+ BFD_RELOC_PPC_EMB_SDA21,
+ BFD_RELOC_PPC_EMB_MRKREF,
+ BFD_RELOC_PPC_EMB_RELSEC16,
+ BFD_RELOC_PPC_EMB_RELST_LO,
+ BFD_RELOC_PPC_EMB_RELST_HI,
+ BFD_RELOC_PPC_EMB_RELST_HA,
+ BFD_RELOC_PPC_EMB_BIT_FLD,
+ BFD_RELOC_PPC_EMB_RELSDA,
+ BFD_RELOC_PPC64_HIGHER,
+ BFD_RELOC_PPC64_HIGHER_S,
+ BFD_RELOC_PPC64_HIGHEST,
+ BFD_RELOC_PPC64_HIGHEST_S,
+ BFD_RELOC_PPC64_TOC16_LO,
+ BFD_RELOC_PPC64_TOC16_HI,
+ BFD_RELOC_PPC64_TOC16_HA,
+ BFD_RELOC_PPC64_TOC,
+ BFD_RELOC_PPC64_PLTGOT16,
+ BFD_RELOC_PPC64_PLTGOT16_LO,
+ BFD_RELOC_PPC64_PLTGOT16_HI,
+ BFD_RELOC_PPC64_PLTGOT16_HA,
+ BFD_RELOC_PPC64_ADDR16_DS,
+ BFD_RELOC_PPC64_ADDR16_LO_DS,
+ BFD_RELOC_PPC64_GOT16_DS,
+ BFD_RELOC_PPC64_GOT16_LO_DS,
+ BFD_RELOC_PPC64_PLT16_LO_DS,
+ BFD_RELOC_PPC64_SECTOFF_DS,
+ BFD_RELOC_PPC64_SECTOFF_LO_DS,
+ BFD_RELOC_PPC64_TOC16_DS,
+ BFD_RELOC_PPC64_TOC16_LO_DS,
+ BFD_RELOC_PPC64_PLTGOT16_DS,
+ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
+
+/* PowerPC and PowerPC64 thread-local storage relocations. */
+ BFD_RELOC_PPC_TLS,
+ BFD_RELOC_PPC_DTPMOD,
+ BFD_RELOC_PPC_TPREL16,
+ BFD_RELOC_PPC_TPREL16_LO,
+ BFD_RELOC_PPC_TPREL16_HI,
+ BFD_RELOC_PPC_TPREL16_HA,
+ BFD_RELOC_PPC_TPREL,
+ BFD_RELOC_PPC_DTPREL16,
+ BFD_RELOC_PPC_DTPREL16_LO,
+ BFD_RELOC_PPC_DTPREL16_HI,
+ BFD_RELOC_PPC_DTPREL16_HA,
+ BFD_RELOC_PPC_DTPREL,
+ BFD_RELOC_PPC_GOT_TLSGD16,
+ BFD_RELOC_PPC_GOT_TLSGD16_LO,
+ BFD_RELOC_PPC_GOT_TLSGD16_HI,
+ BFD_RELOC_PPC_GOT_TLSGD16_HA,
+ BFD_RELOC_PPC_GOT_TLSLD16,
+ BFD_RELOC_PPC_GOT_TLSLD16_LO,
+ BFD_RELOC_PPC_GOT_TLSLD16_HI,
+ BFD_RELOC_PPC_GOT_TLSLD16_HA,
+ BFD_RELOC_PPC_GOT_TPREL16,
+ BFD_RELOC_PPC_GOT_TPREL16_LO,
+ BFD_RELOC_PPC_GOT_TPREL16_HI,
+ BFD_RELOC_PPC_GOT_TPREL16_HA,
+ BFD_RELOC_PPC_GOT_DTPREL16,
+ BFD_RELOC_PPC_GOT_DTPREL16_LO,
+ BFD_RELOC_PPC_GOT_DTPREL16_HI,
+ BFD_RELOC_PPC_GOT_DTPREL16_HA,
+ BFD_RELOC_PPC64_TPREL16_DS,
+ BFD_RELOC_PPC64_TPREL16_LO_DS,
+ BFD_RELOC_PPC64_TPREL16_HIGHER,
+ BFD_RELOC_PPC64_TPREL16_HIGHERA,
+ BFD_RELOC_PPC64_TPREL16_HIGHEST,
+ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
+ BFD_RELOC_PPC64_DTPREL16_DS,
+ BFD_RELOC_PPC64_DTPREL16_LO_DS,
+ BFD_RELOC_PPC64_DTPREL16_HIGHER,
+ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
+ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
+
+/* IBM 370/390 relocations */
+ BFD_RELOC_I370_D12,
+
+/* The type of reloc used to build a constructor table - at the moment
+probably a 32 bit wide absolute relocation, but the target can choose.
+It generally does map to one of the other relocation types. */
+ BFD_RELOC_CTOR,
+
+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. */
+ BFD_RELOC_ARM_PCREL_BRANCH,
+
+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_ARM_PCREL_BLX,
+
+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_THUMB_PCREL_BLX,
+
+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
+The lowest bit must be zero and is not stored in the instruction.
+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
+"nn" one smaller in all cases. Note further that BRANCH23
+corresponds to R_ARM_THM_CALL. */
+ BFD_RELOC_THUMB_PCREL_BRANCH7,
+ BFD_RELOC_THUMB_PCREL_BRANCH9,
+ BFD_RELOC_THUMB_PCREL_BRANCH12,
+ BFD_RELOC_THUMB_PCREL_BRANCH20,
+ BFD_RELOC_THUMB_PCREL_BRANCH23,
+ BFD_RELOC_THUMB_PCREL_BRANCH25,
+
+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
+ BFD_RELOC_ARM_OFFSET_IMM,
+
+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
+ BFD_RELOC_ARM_THUMB_OFFSET,
+
+/* Pc-relative or absolute relocation depending on target. Used for
+entries in .init_array sections. */
+ BFD_RELOC_ARM_TARGET1,
+
+/* Read-only segment base relative address. */
+ BFD_RELOC_ARM_ROSEGREL32,
+
+/* Data segment base relative address. */
+ BFD_RELOC_ARM_SBREL32,
+
+/* This reloc is used for references to RTTI data from exception handling
+tables. The actual definition depends on the target. It may be a
+pc-relative or some form of GOT-indirect relocation. */
+ BFD_RELOC_ARM_TARGET2,
+
+/* 31-bit PC relative address. */
+ BFD_RELOC_ARM_PREL31,
+
+/* Relocations for setting up GOTs and PLTs for shared libraries. */
+ BFD_RELOC_ARM_JUMP_SLOT,
+ BFD_RELOC_ARM_GLOB_DAT,
+ BFD_RELOC_ARM_GOT32,
+ BFD_RELOC_ARM_PLT32,
+ BFD_RELOC_ARM_RELATIVE,
+ BFD_RELOC_ARM_GOTOFF,
+ BFD_RELOC_ARM_GOTPC,
+
+/* ARM thread-local storage relocations. */
+ BFD_RELOC_ARM_TLS_GD32,
+ BFD_RELOC_ARM_TLS_LDO32,
+ BFD_RELOC_ARM_TLS_LDM32,
+ BFD_RELOC_ARM_TLS_DTPOFF32,
+ BFD_RELOC_ARM_TLS_DTPMOD32,
+ BFD_RELOC_ARM_TLS_TPOFF32,
+ BFD_RELOC_ARM_TLS_IE32,
+ BFD_RELOC_ARM_TLS_LE32,
+
+/* These relocs are only used within the ARM assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_ARM_IMMEDIATE,
+ BFD_RELOC_ARM_ADRL_IMMEDIATE,
+ BFD_RELOC_ARM_T32_IMMEDIATE,
+ BFD_RELOC_ARM_SHIFT_IMM,
+ BFD_RELOC_ARM_SMI,
+ BFD_RELOC_ARM_SWI,
+ BFD_RELOC_ARM_MULTI,
+ BFD_RELOC_ARM_CP_OFF_IMM,
+ BFD_RELOC_ARM_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_ADR_IMM,
+ BFD_RELOC_ARM_LDR_IMM,
+ BFD_RELOC_ARM_LITERAL,
+ BFD_RELOC_ARM_IN_POOL,
+ BFD_RELOC_ARM_OFFSET_IMM8,
+ BFD_RELOC_ARM_T32_OFFSET_U8,
+ BFD_RELOC_ARM_T32_OFFSET_IMM,
+ BFD_RELOC_ARM_HWLITERAL,
+ BFD_RELOC_ARM_THUMB_ADD,
+ BFD_RELOC_ARM_THUMB_IMM,
+ BFD_RELOC_ARM_THUMB_SHIFT,
+
+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
+ BFD_RELOC_SH_PCDISP8BY2,
+ BFD_RELOC_SH_PCDISP12BY2,
+ BFD_RELOC_SH_IMM3,
+ BFD_RELOC_SH_IMM3U,
+ BFD_RELOC_SH_DISP12,
+ BFD_RELOC_SH_DISP12BY2,
+ BFD_RELOC_SH_DISP12BY4,
+ BFD_RELOC_SH_DISP12BY8,
+ BFD_RELOC_SH_DISP20,
+ BFD_RELOC_SH_DISP20BY8,
+ BFD_RELOC_SH_IMM4,
+ BFD_RELOC_SH_IMM4BY2,
+ BFD_RELOC_SH_IMM4BY4,
+ BFD_RELOC_SH_IMM8,
+ BFD_RELOC_SH_IMM8BY2,
+ BFD_RELOC_SH_IMM8BY4,
+ BFD_RELOC_SH_PCRELIMM8BY2,
+ BFD_RELOC_SH_PCRELIMM8BY4,
+ BFD_RELOC_SH_SWITCH16,
+ BFD_RELOC_SH_SWITCH32,
+ BFD_RELOC_SH_USES,
+ BFD_RELOC_SH_COUNT,
+ BFD_RELOC_SH_ALIGN,
+ BFD_RELOC_SH_CODE,
+ BFD_RELOC_SH_DATA,
+ BFD_RELOC_SH_LABEL,
+ BFD_RELOC_SH_LOOP_START,
+ BFD_RELOC_SH_LOOP_END,
+ BFD_RELOC_SH_COPY,
+ BFD_RELOC_SH_GLOB_DAT,
+ BFD_RELOC_SH_JMP_SLOT,
+ BFD_RELOC_SH_RELATIVE,
+ BFD_RELOC_SH_GOTPC,
+ BFD_RELOC_SH_GOT_LOW16,
+ BFD_RELOC_SH_GOT_MEDLOW16,
+ BFD_RELOC_SH_GOT_MEDHI16,
+ BFD_RELOC_SH_GOT_HI16,
+ BFD_RELOC_SH_GOTPLT_LOW16,
+ BFD_RELOC_SH_GOTPLT_MEDLOW16,
+ BFD_RELOC_SH_GOTPLT_MEDHI16,
+ BFD_RELOC_SH_GOTPLT_HI16,
+ BFD_RELOC_SH_PLT_LOW16,
+ BFD_RELOC_SH_PLT_MEDLOW16,
+ BFD_RELOC_SH_PLT_MEDHI16,
+ BFD_RELOC_SH_PLT_HI16,
+ BFD_RELOC_SH_GOTOFF_LOW16,
+ BFD_RELOC_SH_GOTOFF_MEDLOW16,
+ BFD_RELOC_SH_GOTOFF_MEDHI16,
+ BFD_RELOC_SH_GOTOFF_HI16,
+ BFD_RELOC_SH_GOTPC_LOW16,
+ BFD_RELOC_SH_GOTPC_MEDLOW16,
+ BFD_RELOC_SH_GOTPC_MEDHI16,
+ BFD_RELOC_SH_GOTPC_HI16,
+ BFD_RELOC_SH_COPY64,
+ BFD_RELOC_SH_GLOB_DAT64,
+ BFD_RELOC_SH_JMP_SLOT64,
+ BFD_RELOC_SH_RELATIVE64,
+ BFD_RELOC_SH_GOT10BY4,
+ BFD_RELOC_SH_GOT10BY8,
+ BFD_RELOC_SH_GOTPLT10BY4,
+ BFD_RELOC_SH_GOTPLT10BY8,
+ BFD_RELOC_SH_GOTPLT32,
+ BFD_RELOC_SH_SHMEDIA_CODE,
+ BFD_RELOC_SH_IMMU5,
+ BFD_RELOC_SH_IMMS6,
+ BFD_RELOC_SH_IMMS6BY32,
+ BFD_RELOC_SH_IMMU6,
+ BFD_RELOC_SH_IMMS10,
+ BFD_RELOC_SH_IMMS10BY2,
+ BFD_RELOC_SH_IMMS10BY4,
+ BFD_RELOC_SH_IMMS10BY8,
+ BFD_RELOC_SH_IMMS16,
+ BFD_RELOC_SH_IMMU16,
+ BFD_RELOC_SH_IMM_LOW16,
+ BFD_RELOC_SH_IMM_LOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDLOW16,
+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDHI16,
+ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
+ BFD_RELOC_SH_IMM_HI16,
+ BFD_RELOC_SH_IMM_HI16_PCREL,
+ BFD_RELOC_SH_PT_16,
+ BFD_RELOC_SH_TLS_GD_32,
+ BFD_RELOC_SH_TLS_LD_32,
+ BFD_RELOC_SH_TLS_LDO_32,
+ BFD_RELOC_SH_TLS_IE_32,
+ BFD_RELOC_SH_TLS_LE_32,
+ BFD_RELOC_SH_TLS_DTPMOD32,
+ BFD_RELOC_SH_TLS_DTPOFF32,
+ BFD_RELOC_SH_TLS_TPOFF32,
+
+/* ARC Cores relocs.
+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. The high 20 bits are installed in bits 26
+through 7 of the instruction. */
+ BFD_RELOC_ARC_B22_PCREL,
+
+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
+stored in the instruction. The high 24 bits are installed in bits 23
+through 0. */
+ BFD_RELOC_ARC_B26,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_10_PCREL_R,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. This is the same as the previous reloc
+except it is in the left container, i.e.,
+shifted left 15 bits. */
+ BFD_RELOC_D10V_10_PCREL_L,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18_PCREL,
+
+/* Mitsubishi D30V relocs.
+This is a 6-bit absolute reloc. */
+ BFD_RELOC_D30V_6,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_9_PCREL,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_9_PCREL_R,
+
+/* This is a 12-bit absolute reloc with the
+right 3 bitsassumed to be 0. */
+ BFD_RELOC_D30V_15,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_15_PCREL,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_15_PCREL_R,
+
+/* This is an 18-bit absolute reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21_PCREL,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_21_PCREL_R,
+
+/* This is a 32-bit absolute reloc. */
+ BFD_RELOC_D30V_32,
+
+/* This is a 32-bit pc-relative reloc. */
+ BFD_RELOC_D30V_32_PCREL,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_HI16_S,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_LO16,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_JMP26,
+
+/* Renesas M16C/M32C Relocations. */
+ BFD_RELOC_M16C_8_PCREL8,
+ BFD_RELOC_M16C_16_PCREL8,
+ BFD_RELOC_M16C_8_PCREL16,
+ BFD_RELOC_M16C_8_ELABEL24,
+ BFD_RELOC_M16C_8_ABS16,
+ BFD_RELOC_M16C_16_ABS16,
+ BFD_RELOC_M16C_16_ABS24,
+ BFD_RELOC_M16C_16_ABS32,
+ BFD_RELOC_M16C_24_ABS16,
+ BFD_RELOC_M16C_24_ABS24,
+ BFD_RELOC_M16C_24_ABS32,
+ BFD_RELOC_M16C_32_ABS16,
+ BFD_RELOC_M16C_32_ABS24,
+ BFD_RELOC_M16C_32_ABS32,
+ BFD_RELOC_M16C_40_ABS16,
+ BFD_RELOC_M16C_40_ABS24,
+ BFD_RELOC_M16C_40_ABS32,
+
+/* Renesas M32R (formerly Mitsubishi M32R) relocs.
+This is a 24 bit absolute address. */
+ BFD_RELOC_M32R_24,
+
+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_10_PCREL,
+
+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_18_PCREL,
+
+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_26_PCREL,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as unsigned. */
+ BFD_RELOC_M32R_HI16_ULO,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as signed. */
+ BFD_RELOC_M32R_HI16_SLO,
+
+/* This is a 16-bit reloc containing the lower 16 bits of an address. */
+ BFD_RELOC_M32R_LO16,
+
+/* This is a 16-bit reloc containing the small data area offset for use in
+add3, load, and store instructions. */
+ BFD_RELOC_M32R_SDA16,
+
+/* For PIC. */
+ BFD_RELOC_M32R_GOT24,
+ BFD_RELOC_M32R_26_PLTREL,
+ BFD_RELOC_M32R_COPY,
+ BFD_RELOC_M32R_GLOB_DAT,
+ BFD_RELOC_M32R_JMP_SLOT,
+ BFD_RELOC_M32R_RELATIVE,
+ BFD_RELOC_M32R_GOTOFF,
+ BFD_RELOC_M32R_GOTOFF_HI_ULO,
+ BFD_RELOC_M32R_GOTOFF_HI_SLO,
+ BFD_RELOC_M32R_GOTOFF_LO,
+ BFD_RELOC_M32R_GOTPC24,
+ BFD_RELOC_M32R_GOT16_HI_ULO,
+ BFD_RELOC_M32R_GOT16_HI_SLO,
+ BFD_RELOC_M32R_GOT16_LO,
+ BFD_RELOC_M32R_GOTPC_HI_ULO,
+ BFD_RELOC_M32R_GOTPC_HI_SLO,
+ BFD_RELOC_M32R_GOTPC_LO,
+
+/* This is a 9-bit reloc */
+ BFD_RELOC_V850_9_PCREL,
+
+/* This is a 22-bit reloc */
+ BFD_RELOC_V850_22_PCREL,
+
+/* This is a 16 bit offset from the short data area pointer. */
+ BFD_RELOC_V850_SDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+short data area pointer. */
+ BFD_RELOC_V850_SDA_15_16_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer. */
+ BFD_RELOC_V850_ZDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+zero data area pointer. */
+ BFD_RELOC_V850_ZDA_15_16_OFFSET,
+
+/* This is an 8 bit offset (of which only 6 bits are used) from the
+tiny data area pointer. */
+ BFD_RELOC_V850_TDA_6_8_OFFSET,
+
+/* This is an 8bit offset (of which only 7 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_7_8_OFFSET,
+
+/* This is a 7 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_7_7_OFFSET,
+
+/* This is a 16 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_16_16_OFFSET,
+
+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_4_5_OFFSET,
+
+/* This is a 4 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_4_4_OFFSET,
+
+/* This is a 16 bit offset from the short data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
+
+/* This is a 6 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_6_7_OFFSET,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_16_16_OFFSET,
+
+/* Used for relaxing indirect function calls. */
+ BFD_RELOC_V850_LONGCALL,
+
+/* Used for relaxing indirect jumps. */
+ BFD_RELOC_V850_LONGJUMP,
+
+/* Used to maintain alignment whilst relaxing. */
+ BFD_RELOC_V850_ALIGN,
+
+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
+instructions. */
+ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
+
+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_32_PCREL,
+
+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_16_PCREL,
+
+/* This is a 8bit DP reloc for the tms320c30, where the most
+significant 8 bits of a 24 bit word are placed into the least
+significant 8 bits of the opcode. */
+ BFD_RELOC_TIC30_LDP,
+
+/* This is a 7bit reloc for the tms320c54x, where the least
+significant 7 bits of a 16 bit word are placed into the least
+significant 7 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTLS7,
+
+/* This is a 9bit DP reloc for the tms320c54x, where the most
+significant 9 bits of a 16 bit word are placed into the least
+significant 9 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTMS9,
+
+/* This is an extended address 23-bit reloc for the tms320c54x. */
+ BFD_RELOC_TIC54X_23,
+
+/* This is a 16-bit reloc for the tms320c54x, where the least
+significant 16 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_16_OF_23,
+
+/* This is a reloc for the tms320c54x, where the most
+significant 7 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_MS7_OF_23,
+
+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
+ BFD_RELOC_FR30_48,
+
+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
+two sections. */
+ BFD_RELOC_FR30_20,
+
+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
+4 bits. */
+ BFD_RELOC_FR30_6_IN_4,
+
+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
+into 8 bits. */
+ BFD_RELOC_FR30_8_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
+into 8 bits. */
+ BFD_RELOC_FR30_9_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
+into 8 bits. */
+ BFD_RELOC_FR30_10_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
+short offset into 8 bits. */
+ BFD_RELOC_FR30_9_PCREL,
+
+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
+short offset into 11 bits. */
+ BFD_RELOC_FR30_12_PCREL,
+
+/* Motorola Mcore relocations. */
+ BFD_RELOC_MCORE_PCREL_IMM8BY4,
+ BFD_RELOC_MCORE_PCREL_IMM11BY2,
+ BFD_RELOC_MCORE_PCREL_IMM4BY2,
+ BFD_RELOC_MCORE_PCREL_32,
+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
+ BFD_RELOC_MCORE_RVA,
+
+/* These are relocations for the GETA instruction. */
+ BFD_RELOC_MMIX_GETA,
+ BFD_RELOC_MMIX_GETA_1,
+ BFD_RELOC_MMIX_GETA_2,
+ BFD_RELOC_MMIX_GETA_3,
+
+/* These are relocations for a conditional branch instruction. */
+ BFD_RELOC_MMIX_CBRANCH,
+ BFD_RELOC_MMIX_CBRANCH_J,
+ BFD_RELOC_MMIX_CBRANCH_1,
+ BFD_RELOC_MMIX_CBRANCH_2,
+ BFD_RELOC_MMIX_CBRANCH_3,
+
+/* These are relocations for the PUSHJ instruction. */
+ BFD_RELOC_MMIX_PUSHJ,
+ BFD_RELOC_MMIX_PUSHJ_1,
+ BFD_RELOC_MMIX_PUSHJ_2,
+ BFD_RELOC_MMIX_PUSHJ_3,
+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
+
+/* These are relocations for the JMP instruction. */
+ BFD_RELOC_MMIX_JMP,
+ BFD_RELOC_MMIX_JMP_1,
+ BFD_RELOC_MMIX_JMP_2,
+ BFD_RELOC_MMIX_JMP_3,
+
+/* This is a relocation for a relative address as in a GETA instruction or
+a branch. */
+ BFD_RELOC_MMIX_ADDR19,
+
+/* This is a relocation for a relative address as in a JMP instruction. */
+ BFD_RELOC_MMIX_ADDR27,
+
+/* This is a relocation for an instruction field that may be a general
+register or a value 0..255. */
+ BFD_RELOC_MMIX_REG_OR_BYTE,
+
+/* This is a relocation for an instruction field that may be a general
+register. */
+ BFD_RELOC_MMIX_REG,
+
+/* This is a relocation for two instruction fields holding a register and
+an offset, the equivalent of the relocation. */
+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
+
+/* This relocation is an assertion that the expression is not allocated as
+a global register. It does not modify contents. */
+ BFD_RELOC_MMIX_LOCAL,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
+short offset into 7 bits. */
+ BFD_RELOC_AVR_7_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
+short offset into 12 bits. */
+ BFD_RELOC_AVR_13_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
+program memory address) into 16 bits. */
+ BFD_RELOC_AVR_16_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of program memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually data memory address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of data memory address) into 8 bit immediate value of
+SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(most high 8 bit of program memory address) into 8 bit immediate value
+of LDI or SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually command address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of 16 bit command address) into 8 bit immediate value
+of SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 6 bit of 22 bit command address) into 8 bit immediate
+value of SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
+
+/* This is a 32 bit reloc for the AVR that stores 23 bit value
+into 22 bits. */
+ BFD_RELOC_AVR_CALL,
+
+/* This is a 16 bit reloc for the AVR that stores all needed bits
+for absolute addressing with ldi with overflow check to linktime */
+ BFD_RELOC_AVR_LDI,
+
+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
+instructions */
+ BFD_RELOC_AVR_6,
+
+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
+instructions */
+ BFD_RELOC_AVR_6_ADIW,
+
+/* Direct 12 bit. */
+ BFD_RELOC_390_12,
+
+/* 12 bit GOT offset. */
+ BFD_RELOC_390_GOT12,
+
+/* 32 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT32,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_390_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_390_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_390_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_390_RELATIVE,
+
+/* 32 bit PC relative offset to GOT. */
+ BFD_RELOC_390_GOTPC,
+
+/* 16 bit GOT offset. */
+ BFD_RELOC_390_GOT16,
+
+/* PC relative 16 bit shifted by 1. */
+ BFD_RELOC_390_PC16DBL,
+
+/* 16 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT16DBL,
+
+/* PC relative 32 bit shifted by 1. */
+ BFD_RELOC_390_PC32DBL,
+
+/* 32 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT32DBL,
+
+/* 32 bit PC rel. GOT shifted by 1. */
+ BFD_RELOC_390_GOTPCDBL,
+
+/* 64 bit GOT offset. */
+ BFD_RELOC_390_GOT64,
+
+/* 64 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT64,
+
+/* 32 bit rel. offset to GOT entry. */
+ BFD_RELOC_390_GOTENT,
+
+/* 64 bit offset to GOT. */
+ BFD_RELOC_390_GOTOFF64,
+
+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT12,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT16,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT32,
+
+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT64,
+
+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLTENT,
+
+/* 16-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF16,
+
+/* 32-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF32,
+
+/* 64-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF64,
+
+/* s390 tls relocations. */
+ BFD_RELOC_390_TLS_LOAD,
+ BFD_RELOC_390_TLS_GDCALL,
+ BFD_RELOC_390_TLS_LDCALL,
+ BFD_RELOC_390_TLS_GD32,
+ BFD_RELOC_390_TLS_GD64,
+ BFD_RELOC_390_TLS_GOTIE12,
+ BFD_RELOC_390_TLS_GOTIE32,
+ BFD_RELOC_390_TLS_GOTIE64,
+ BFD_RELOC_390_TLS_LDM32,
+ BFD_RELOC_390_TLS_LDM64,
+ BFD_RELOC_390_TLS_IE32,
+ BFD_RELOC_390_TLS_IE64,
+ BFD_RELOC_390_TLS_IEENT,
+ BFD_RELOC_390_TLS_LE32,
+ BFD_RELOC_390_TLS_LE64,
+ BFD_RELOC_390_TLS_LDO32,
+ BFD_RELOC_390_TLS_LDO64,
+ BFD_RELOC_390_TLS_DTPMOD,
+ BFD_RELOC_390_TLS_DTPOFF,
+ BFD_RELOC_390_TLS_TPOFF,
+
+/* Long displacement extension. */
+ BFD_RELOC_390_20,
+ BFD_RELOC_390_GOT20,
+ BFD_RELOC_390_GOTPLT20,
+ BFD_RELOC_390_TLS_GOTIE20,
+
+/* Scenix IP2K - 9-bit register number / data address */
+ BFD_RELOC_IP2K_FR9,
+
+/* Scenix IP2K - 4-bit register/data bank number */
+ BFD_RELOC_IP2K_BANK,
+
+/* Scenix IP2K - low 13 bits of instruction word address */
+ BFD_RELOC_IP2K_ADDR16CJP,
+
+/* Scenix IP2K - high 3 bits of instruction word address */
+ BFD_RELOC_IP2K_PAGE3,
+
+/* Scenix IP2K - ext/low/high 8 bits of data address */
+ BFD_RELOC_IP2K_LO8DATA,
+ BFD_RELOC_IP2K_HI8DATA,
+ BFD_RELOC_IP2K_EX8DATA,
+
+/* Scenix IP2K - low/high 8 bits of instruction word address */
+ BFD_RELOC_IP2K_LO8INSN,
+ BFD_RELOC_IP2K_HI8INSN,
+
+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
+ BFD_RELOC_IP2K_PC_SKIP,
+
+/* Scenix IP2K - 16 bit word address in text section. */
+ BFD_RELOC_IP2K_TEXT,
+
+/* Scenix IP2K - 7-bit sp or dp offset */
+ BFD_RELOC_IP2K_FR_OFFSET,
+
+/* Scenix VPE4K coprocessor - data/insn-space addressing */
+ BFD_RELOC_VPE4KMATH_DATA,
+ BFD_RELOC_VPE4KMATH_INSN,
+
+/* These two relocations are used by the linker to determine which of
+the entries in a C++ virtual function table are actually used. When
+the --gc-sections option is given, the linker will zero out the entries
+that are not used, so that the code for those functions need not be
+included in the output.
+
+VTABLE_INHERIT is a zero-space relocation used to describe to the
+linker the inheritance tree of a C++ virtual function table. The
+relocation's symbol should be the parent class' vtable, and the
+relocation should be located at the child vtable.
+
+VTABLE_ENTRY is a zero-space relocation that describes the use of a
+virtual function table entry. The reloc's symbol should refer to the
+table of the class mentioned in the code. Off of that base, an offset
+describes the entry that is being used. For Rela hosts, this offset
+is stored in the reloc's addend. For Rel hosts, we are forced to put
+this offset in the reloc's section offset. */
+ BFD_RELOC_VTABLE_INHERIT,
+ BFD_RELOC_VTABLE_ENTRY,
+
+/* Intel IA64 Relocations. */
+ BFD_RELOC_IA64_IMM14,
+ BFD_RELOC_IA64_IMM22,
+ BFD_RELOC_IA64_IMM64,
+ BFD_RELOC_IA64_DIR32MSB,
+ BFD_RELOC_IA64_DIR32LSB,
+ BFD_RELOC_IA64_DIR64MSB,
+ BFD_RELOC_IA64_DIR64LSB,
+ BFD_RELOC_IA64_GPREL22,
+ BFD_RELOC_IA64_GPREL64I,
+ BFD_RELOC_IA64_GPREL32MSB,
+ BFD_RELOC_IA64_GPREL32LSB,
+ BFD_RELOC_IA64_GPREL64MSB,
+ BFD_RELOC_IA64_GPREL64LSB,
+ BFD_RELOC_IA64_LTOFF22,
+ BFD_RELOC_IA64_LTOFF64I,
+ BFD_RELOC_IA64_PLTOFF22,
+ BFD_RELOC_IA64_PLTOFF64I,
+ BFD_RELOC_IA64_PLTOFF64MSB,
+ BFD_RELOC_IA64_PLTOFF64LSB,
+ BFD_RELOC_IA64_FPTR64I,
+ BFD_RELOC_IA64_FPTR32MSB,
+ BFD_RELOC_IA64_FPTR32LSB,
+ BFD_RELOC_IA64_FPTR64MSB,
+ BFD_RELOC_IA64_FPTR64LSB,
+ BFD_RELOC_IA64_PCREL21B,
+ BFD_RELOC_IA64_PCREL21BI,
+ BFD_RELOC_IA64_PCREL21M,
+ BFD_RELOC_IA64_PCREL21F,
+ BFD_RELOC_IA64_PCREL22,
+ BFD_RELOC_IA64_PCREL60B,
+ BFD_RELOC_IA64_PCREL64I,
+ BFD_RELOC_IA64_PCREL32MSB,
+ BFD_RELOC_IA64_PCREL32LSB,
+ BFD_RELOC_IA64_PCREL64MSB,
+ BFD_RELOC_IA64_PCREL64LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR22,
+ BFD_RELOC_IA64_LTOFF_FPTR64I,
+ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
+ BFD_RELOC_IA64_SEGREL32MSB,
+ BFD_RELOC_IA64_SEGREL32LSB,
+ BFD_RELOC_IA64_SEGREL64MSB,
+ BFD_RELOC_IA64_SEGREL64LSB,
+ BFD_RELOC_IA64_SECREL32MSB,
+ BFD_RELOC_IA64_SECREL32LSB,
+ BFD_RELOC_IA64_SECREL64MSB,
+ BFD_RELOC_IA64_SECREL64LSB,
+ BFD_RELOC_IA64_REL32MSB,
+ BFD_RELOC_IA64_REL32LSB,
+ BFD_RELOC_IA64_REL64MSB,
+ BFD_RELOC_IA64_REL64LSB,
+ BFD_RELOC_IA64_LTV32MSB,
+ BFD_RELOC_IA64_LTV32LSB,
+ BFD_RELOC_IA64_LTV64MSB,
+ BFD_RELOC_IA64_LTV64LSB,
+ BFD_RELOC_IA64_IPLTMSB,
+ BFD_RELOC_IA64_IPLTLSB,
+ BFD_RELOC_IA64_COPY,
+ BFD_RELOC_IA64_LTOFF22X,
+ BFD_RELOC_IA64_LDXMOV,
+ BFD_RELOC_IA64_TPREL14,
+ BFD_RELOC_IA64_TPREL22,
+ BFD_RELOC_IA64_TPREL64I,
+ BFD_RELOC_IA64_TPREL64MSB,
+ BFD_RELOC_IA64_TPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_TPREL22,
+ BFD_RELOC_IA64_DTPMOD64MSB,
+ BFD_RELOC_IA64_DTPMOD64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPMOD22,
+ BFD_RELOC_IA64_DTPREL14,
+ BFD_RELOC_IA64_DTPREL22,
+ BFD_RELOC_IA64_DTPREL64I,
+ BFD_RELOC_IA64_DTPREL32MSB,
+ BFD_RELOC_IA64_DTPREL32LSB,
+ BFD_RELOC_IA64_DTPREL64MSB,
+ BFD_RELOC_IA64_DTPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPREL22,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit high part of an absolute address. */
+ BFD_RELOC_M68HC11_HI8,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit low part of an absolute address. */
+ BFD_RELOC_M68HC11_LO8,
+
+/* Motorola 68HC11 reloc.
+This is the 3 bit of a value. */
+ BFD_RELOC_M68HC11_3B,
+
+/* Motorola 68HC11 reloc.
+This reloc marks the beginning of a jump/call instruction.
+It is used for linker relaxation to correctly identify beginning
+of instruction and change some branches to use PC-relative
+addressing mode. */
+ BFD_RELOC_M68HC11_RL_JUMP,
+
+/* Motorola 68HC11 reloc.
+This reloc marks a group of several instructions that gcc generates
+and for which the linker relaxation pass can modify and/or remove
+some of them. */
+ BFD_RELOC_M68HC11_RL_GROUP,
+
+/* Motorola 68HC11 reloc.
+This is the 16-bit lower part of an address. It is used for 'call'
+instruction to specify the symbol address without any special
+transformation (due to memory bank window). */
+ BFD_RELOC_M68HC11_LO16,
+
+/* Motorola 68HC11 reloc.
+This is a 8-bit reloc that specifies the page number of an address.
+It is used by 'call' instruction to specify the page number of
+the symbol. */
+ BFD_RELOC_M68HC11_PAGE,
+
+/* Motorola 68HC11 reloc.
+This is a 24-bit reloc that represents the address with a 16-bit
+value and a 8-bit page number. The symbol address is transformed
+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
+ BFD_RELOC_M68HC11_24,
+
+/* Motorola 68HC12 reloc.
+This is the 5 bits of a value. */
+ BFD_RELOC_M68HC12_5B,
+
+/* NS CR16C Relocations. */
+ BFD_RELOC_16C_NUM08,
+ BFD_RELOC_16C_NUM08_C,
+ BFD_RELOC_16C_NUM16,
+ BFD_RELOC_16C_NUM16_C,
+ BFD_RELOC_16C_NUM32,
+ BFD_RELOC_16C_NUM32_C,
+ BFD_RELOC_16C_DISP04,
+ BFD_RELOC_16C_DISP04_C,
+ BFD_RELOC_16C_DISP08,
+ BFD_RELOC_16C_DISP08_C,
+ BFD_RELOC_16C_DISP16,
+ BFD_RELOC_16C_DISP16_C,
+ BFD_RELOC_16C_DISP24,
+ BFD_RELOC_16C_DISP24_C,
+ BFD_RELOC_16C_DISP24a,
+ BFD_RELOC_16C_DISP24a_C,
+ BFD_RELOC_16C_REG04,
+ BFD_RELOC_16C_REG04_C,
+ BFD_RELOC_16C_REG04a,
+ BFD_RELOC_16C_REG04a_C,
+ BFD_RELOC_16C_REG14,
+ BFD_RELOC_16C_REG14_C,
+ BFD_RELOC_16C_REG16,
+ BFD_RELOC_16C_REG16_C,
+ BFD_RELOC_16C_REG20,
+ BFD_RELOC_16C_REG20_C,
+ BFD_RELOC_16C_ABS20,
+ BFD_RELOC_16C_ABS20_C,
+ BFD_RELOC_16C_ABS24,
+ BFD_RELOC_16C_ABS24_C,
+ BFD_RELOC_16C_IMM04,
+ BFD_RELOC_16C_IMM04_C,
+ BFD_RELOC_16C_IMM16,
+ BFD_RELOC_16C_IMM16_C,
+ BFD_RELOC_16C_IMM20,
+ BFD_RELOC_16C_IMM20_C,
+ BFD_RELOC_16C_IMM24,
+ BFD_RELOC_16C_IMM24_C,
+ BFD_RELOC_16C_IMM32,
+ BFD_RELOC_16C_IMM32_C,
+
+/* NS CRX Relocations. */
+ BFD_RELOC_CRX_REL4,
+ BFD_RELOC_CRX_REL8,
+ BFD_RELOC_CRX_REL8_CMP,
+ BFD_RELOC_CRX_REL16,
+ BFD_RELOC_CRX_REL24,
+ BFD_RELOC_CRX_REL32,
+ BFD_RELOC_CRX_REGREL12,
+ BFD_RELOC_CRX_REGREL22,
+ BFD_RELOC_CRX_REGREL28,
+ BFD_RELOC_CRX_REGREL32,
+ BFD_RELOC_CRX_ABS16,
+ BFD_RELOC_CRX_ABS32,
+ BFD_RELOC_CRX_NUM8,
+ BFD_RELOC_CRX_NUM16,
+ BFD_RELOC_CRX_NUM32,
+ BFD_RELOC_CRX_IMM16,
+ BFD_RELOC_CRX_IMM32,
+ BFD_RELOC_CRX_SWITCH8,
+ BFD_RELOC_CRX_SWITCH16,
+ BFD_RELOC_CRX_SWITCH32,
+
+/* These relocs are only used within the CRIS assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_CRIS_BDISP8,
+ BFD_RELOC_CRIS_UNSIGNED_5,
+ BFD_RELOC_CRIS_SIGNED_6,
+ BFD_RELOC_CRIS_UNSIGNED_6,
+ BFD_RELOC_CRIS_SIGNED_8,
+ BFD_RELOC_CRIS_UNSIGNED_8,
+ BFD_RELOC_CRIS_SIGNED_16,
+ BFD_RELOC_CRIS_UNSIGNED_16,
+ BFD_RELOC_CRIS_LAPCQ_OFFSET,
+ BFD_RELOC_CRIS_UNSIGNED_4,
+
+/* Relocs used in ELF shared libraries for CRIS. */
+ BFD_RELOC_CRIS_COPY,
+ BFD_RELOC_CRIS_GLOB_DAT,
+ BFD_RELOC_CRIS_JUMP_SLOT,
+ BFD_RELOC_CRIS_RELATIVE,
+
+/* 32-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_32_GOT,
+
+/* 16-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_16_GOT,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_32_GOTPLT,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_16_GOTPLT,
+
+/* 32-bit offset to symbol, relative to GOT. */
+ BFD_RELOC_CRIS_32_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to GOT. */
+ BFD_RELOC_CRIS_32_PLT_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
+ BFD_RELOC_CRIS_32_PLT_PCREL,
+
+/* Intel i860 Relocations. */
+ BFD_RELOC_860_COPY,
+ BFD_RELOC_860_GLOB_DAT,
+ BFD_RELOC_860_JUMP_SLOT,
+ BFD_RELOC_860_RELATIVE,
+ BFD_RELOC_860_PC26,
+ BFD_RELOC_860_PLT26,
+ BFD_RELOC_860_PC16,
+ BFD_RELOC_860_LOW0,
+ BFD_RELOC_860_SPLIT0,
+ BFD_RELOC_860_LOW1,
+ BFD_RELOC_860_SPLIT1,
+ BFD_RELOC_860_LOW2,
+ BFD_RELOC_860_SPLIT2,
+ BFD_RELOC_860_LOW3,
+ BFD_RELOC_860_LOGOT0,
+ BFD_RELOC_860_SPGOT0,
+ BFD_RELOC_860_LOGOT1,
+ BFD_RELOC_860_SPGOT1,
+ BFD_RELOC_860_LOGOTOFF0,
+ BFD_RELOC_860_SPGOTOFF0,
+ BFD_RELOC_860_LOGOTOFF1,
+ BFD_RELOC_860_SPGOTOFF1,
+ BFD_RELOC_860_LOGOTOFF2,
+ BFD_RELOC_860_LOGOTOFF3,
+ BFD_RELOC_860_LOPC,
+ BFD_RELOC_860_HIGHADJ,
+ BFD_RELOC_860_HAGOT,
+ BFD_RELOC_860_HAGOTOFF,
+ BFD_RELOC_860_HAPC,
+ BFD_RELOC_860_HIGH,
+ BFD_RELOC_860_HIGOT,
+ BFD_RELOC_860_HIGOTOFF,
+
+/* OpenRISC Relocations. */
+ BFD_RELOC_OPENRISC_ABS_26,
+ BFD_RELOC_OPENRISC_REL_26,
+
+/* H8 elf Relocations. */
+ BFD_RELOC_H8_DIR16A8,
+ BFD_RELOC_H8_DIR16R8,
+ BFD_RELOC_H8_DIR24A8,
+ BFD_RELOC_H8_DIR24R8,
+ BFD_RELOC_H8_DIR32A16,
+
+/* Sony Xstormy16 Relocations. */
+ BFD_RELOC_XSTORMY16_REL_12,
+ BFD_RELOC_XSTORMY16_12,
+ BFD_RELOC_XSTORMY16_24,
+ BFD_RELOC_XSTORMY16_FPTR16,
+
+/* Relocations used by VAX ELF. */
+ BFD_RELOC_VAX_GLOB_DAT,
+ BFD_RELOC_VAX_JMP_SLOT,
+ BFD_RELOC_VAX_RELATIVE,
+
+/* Morpho MS1 - 16 bit immediate relocation. */
+ BFD_RELOC_MS1_PC16,
+
+/* Morpho MS1 - Hi 16 bits of an address. */
+ BFD_RELOC_MS1_HI16,
+
+/* Morpho MS1 - Low 16 bits of an address. */
+ BFD_RELOC_MS1_LO16,
+
+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MS1_GNU_VTINHERIT,
+
+/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MS1_GNU_VTENTRY,
+
+/* msp430 specific relocation codes */
+ BFD_RELOC_MSP430_10_PCREL,
+ BFD_RELOC_MSP430_16_PCREL,
+ BFD_RELOC_MSP430_16,
+ BFD_RELOC_MSP430_16_PCREL_BYTE,
+ BFD_RELOC_MSP430_16_BYTE,
+ BFD_RELOC_MSP430_2X_PCREL,
+ BFD_RELOC_MSP430_RL_PCREL,
+
+/* IQ2000 Relocations. */
+ BFD_RELOC_IQ2000_OFFSET_16,
+ BFD_RELOC_IQ2000_OFFSET_21,
+ BFD_RELOC_IQ2000_UHI16,
+
+/* Special Xtensa relocation used only by PLT entries in ELF shared
+objects to indicate that the runtime linker should set the value
+to one of its own internal functions or data structures. */
+ BFD_RELOC_XTENSA_RTLD,
+
+/* Xtensa relocations for ELF shared objects. */
+ BFD_RELOC_XTENSA_GLOB_DAT,
+ BFD_RELOC_XTENSA_JMP_SLOT,
+ BFD_RELOC_XTENSA_RELATIVE,
+
+/* Xtensa relocation used in ELF object files for symbols that may require
+PLT entries. Otherwise, this is just a generic 32-bit relocation. */
+ BFD_RELOC_XTENSA_PLT,
+
+/* Xtensa relocations to mark the difference of two local symbols.
+These are only needed to support linker relaxation and can be ignored
+when not relaxing. The field is set to the value of the difference
+assuming no relaxation. The relocation encodes the position of the
+first symbol so the linker can determine whether to adjust the field
+value. */
+ BFD_RELOC_XTENSA_DIFF8,
+ BFD_RELOC_XTENSA_DIFF16,
+ BFD_RELOC_XTENSA_DIFF32,
+
+/* Generic Xtensa relocations for instruction operands. Only the slot
+number is encoded in the relocation. The relocation applies to the
+last PC-relative immediate operand, or if there are no PC-relative
+immediates, to the last immediate operand. */
+ BFD_RELOC_XTENSA_SLOT0_OP,
+ BFD_RELOC_XTENSA_SLOT1_OP,
+ BFD_RELOC_XTENSA_SLOT2_OP,
+ BFD_RELOC_XTENSA_SLOT3_OP,
+ BFD_RELOC_XTENSA_SLOT4_OP,
+ BFD_RELOC_XTENSA_SLOT5_OP,
+ BFD_RELOC_XTENSA_SLOT6_OP,
+ BFD_RELOC_XTENSA_SLOT7_OP,
+ BFD_RELOC_XTENSA_SLOT8_OP,
+ BFD_RELOC_XTENSA_SLOT9_OP,
+ BFD_RELOC_XTENSA_SLOT10_OP,
+ BFD_RELOC_XTENSA_SLOT11_OP,
+ BFD_RELOC_XTENSA_SLOT12_OP,
+ BFD_RELOC_XTENSA_SLOT13_OP,
+ BFD_RELOC_XTENSA_SLOT14_OP,
+
+/* Alternate Xtensa relocations. Only the slot is encoded in the
+relocation. The meaning of these relocations is opcode-specific. */
+ BFD_RELOC_XTENSA_SLOT0_ALT,
+ BFD_RELOC_XTENSA_SLOT1_ALT,
+ BFD_RELOC_XTENSA_SLOT2_ALT,
+ BFD_RELOC_XTENSA_SLOT3_ALT,
+ BFD_RELOC_XTENSA_SLOT4_ALT,
+ BFD_RELOC_XTENSA_SLOT5_ALT,
+ BFD_RELOC_XTENSA_SLOT6_ALT,
+ BFD_RELOC_XTENSA_SLOT7_ALT,
+ BFD_RELOC_XTENSA_SLOT8_ALT,
+ BFD_RELOC_XTENSA_SLOT9_ALT,
+ BFD_RELOC_XTENSA_SLOT10_ALT,
+ BFD_RELOC_XTENSA_SLOT11_ALT,
+ BFD_RELOC_XTENSA_SLOT12_ALT,
+ BFD_RELOC_XTENSA_SLOT13_ALT,
+ BFD_RELOC_XTENSA_SLOT14_ALT,
+
+/* Xtensa relocations for backward compatibility. These have all been
+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
+ BFD_RELOC_XTENSA_OP0,
+ BFD_RELOC_XTENSA_OP1,
+ BFD_RELOC_XTENSA_OP2,
+
+/* Xtensa relocation to mark that the assembler expanded the
+instructions from an original target. The expansion size is
+encoded in the reloc size. */
+ BFD_RELOC_XTENSA_ASM_EXPAND,
+
+/* Xtensa relocation to mark that the linker should simplify
+assembler-expanded instructions. This is commonly used
+internally by the linker after analysis of a
+BFD_RELOC_XTENSA_ASM_EXPAND. */
+ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
+ BFD_RELOC_UNUSED };
+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
+reloc_howto_type *bfd_reloc_type_lookup
+ (bfd *abfd, bfd_reloc_code_real_type code);
+
+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
+
+/* Extracted from syms.c. */
+
+typedef struct bfd_symbol
+{
+ /* A pointer to the BFD which owns the symbol. This information
+ is necessary so that a back end can work out what additional
+ information (invisible to the application writer) is carried
+ with the symbol.
+
+ This field is *almost* redundant, since you can use section->owner
+ instead, except that some symbols point to the global sections
+ bfd_{abs,com,und}_section. This could be fixed by making
+ these globals be per-bfd (or per-target-flavor). FIXME. */
+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
+
+ /* The text of the symbol. The name is left alone, and not copied; the
+ application may not alter it. */
+ const char *name;
+
+ /* The value of the symbol. This really should be a union of a
+ numeric value with a pointer, since some flags indicate that
+ a pointer to another symbol is stored here. */
+ symvalue value;
+
+ /* Attributes of a symbol. */
+#define BSF_NO_FLAGS 0x00
+
+ /* The symbol has local scope; <<static>> in <<C>>. The value
+ is the offset into the section of the data. */
+#define BSF_LOCAL 0x01
+
+ /* The symbol has global scope; initialized data in <<C>>. The
+ value is the offset into the section of the data. */
+#define BSF_GLOBAL 0x02
+
+ /* The symbol has global scope and is exported. The value is
+ the offset into the section of the data. */
+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
+
+ /* A normal C symbol would be one of:
+ <<BSF_LOCAL>>, <<BSF_FORT_COMM>>, <<BSF_UNDEFINED>> or
+ <<BSF_GLOBAL>>. */
+
+ /* The symbol is a debugging record. The value has an arbitrary
+ meaning, unless BSF_DEBUGGING_RELOC is also set. */
+#define BSF_DEBUGGING 0x08
+
+ /* The symbol denotes a function entry point. Used in ELF,
+ perhaps others someday. */
+#define BSF_FUNCTION 0x10
+
+ /* Used by the linker. */
+#define BSF_KEEP 0x20
+#define BSF_KEEP_G 0x40
+
+ /* A weak global symbol, overridable without warnings by
+ a regular global symbol of the same name. */
+#define BSF_WEAK 0x80
+
+ /* This symbol was created to point to a section, e.g. ELF's
+ STT_SECTION symbols. */
+#define BSF_SECTION_SYM 0x100
+
+ /* The symbol used to be a common symbol, but now it is
+ allocated. */
+#define BSF_OLD_COMMON 0x200
+
+ /* The default value for common data. */
+#define BFD_FORT_COMM_DEFAULT_VALUE 0
+
+ /* In some files the type of a symbol sometimes alters its
+ location in an output file - ie in coff a <<ISFCN>> symbol
+ which is also <<C_EXT>> symbol appears where it was
+ declared and not at the end of a section. This bit is set
+ by the target BFD part to convey this information. */
+#define BSF_NOT_AT_END 0x400
+
+ /* Signal that the symbol is the label of constructor section. */
+#define BSF_CONSTRUCTOR 0x800
+
+ /* Signal that the symbol is a warning symbol. The name is a
+ warning. The name of the next symbol is the one to warn about;
+ if a reference is made to a symbol with the same name as the next
+ symbol, a warning is issued by the linker. */
+#define BSF_WARNING 0x1000
+
+ /* Signal that the symbol is indirect. This symbol is an indirect
+ pointer to the symbol with the same name as the next symbol. */
+#define BSF_INDIRECT 0x2000
+
+ /* BSF_FILE marks symbols that contain a file name. This is used
+ for ELF STT_FILE symbols. */
+#define BSF_FILE 0x4000
+
+ /* Symbol is from dynamic linking information. */
+#define BSF_DYNAMIC 0x8000
+
+ /* The symbol denotes a data object. Used in ELF, and perhaps
+ others someday. */
+#define BSF_OBJECT 0x10000
+
+ /* This symbol is a debugging symbol. The value is the offset
+ into the section of the data. BSF_DEBUGGING should be set
+ as well. */
+#define BSF_DEBUGGING_RELOC 0x20000
+
+ /* This symbol is thread local. Used in ELF. */
+#define BSF_THREAD_LOCAL 0x40000
+
+ flagword flags;
+
+ /* A pointer to the section to which this symbol is
+ relative. This will always be non NULL, there are special
+ sections for undefined and absolute symbols. */
+ struct bfd_section *section;
+
+ /* Back end special data. */
+ union
+ {
+ void *p;
+ bfd_vma i;
+ }
+ udata;
+}
+asymbol;
+
+#define bfd_get_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
+
+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
+
+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
+
+#define bfd_is_local_label_name(abfd, name) \
+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
+
+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
+
+#define bfd_is_target_special_symbol(abfd, sym) \
+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
+
+#define bfd_canonicalize_symtab(abfd, location) \
+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
+
+bfd_boolean bfd_set_symtab
+ (bfd *abfd, asymbol **location, unsigned int count);
+
+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
+
+#define bfd_make_empty_symbol(abfd) \
+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
+
+asymbol *_bfd_generic_make_empty_symbol (bfd *);
+
+#define bfd_make_debug_symbol(abfd,ptr,size) \
+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
+
+int bfd_decode_symclass (asymbol *symbol);
+
+bfd_boolean bfd_is_undefined_symclass (int symclass);
+
+void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
+
+bfd_boolean bfd_copy_private_symbol_data
+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
+
+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
+ (ibfd, isymbol, obfd, osymbol))
+
+/* Extracted from bfd.c. */
+struct bfd
+{
+ /* A unique identifier of the BFD */
+ unsigned int id;
+
+ /* The filename the application opened the BFD with. */
+ const char *filename;
+
+ /* A pointer to the target jump table. */
+ const struct bfd_target *xvec;
+
+ /* The IOSTREAM, and corresponding IO vector that provide access
+ to the file backing the BFD. */
+ void *iostream;
+ const struct bfd_iovec *iovec;
+
+ /* Is the file descriptor being cached? That is, can it be closed as
+ needed, and re-opened when accessed later? */
+ bfd_boolean cacheable;
+
+ /* Marks whether there was a default target specified when the
+ BFD was opened. This is used to select which matching algorithm
+ to use to choose the back end. */
+ bfd_boolean target_defaulted;
+
+ /* The caching routines use these to maintain a
+ least-recently-used list of BFDs. */
+ struct bfd *lru_prev, *lru_next;
+
+ /* When a file is closed by the caching routines, BFD retains
+ state information on the file here... */
+ ufile_ptr where;
+
+ /* ... and here: (``once'' means at least once). */
+ bfd_boolean opened_once;
+
+ /* Set if we have a locally maintained mtime value, rather than
+ getting it from the file each time. */
+ bfd_boolean mtime_set;
+
+ /* File modified time, if mtime_set is TRUE. */
+ long mtime;
+
+ /* Reserved for an unimplemented file locking extension. */
+ int ifd;
+
+ /* The format which belongs to the BFD. (object, core, etc.) */
+ bfd_format format;
+
+ /* The direction with which the BFD was opened. */
+ enum bfd_direction
+ {
+ no_direction = 0,
+ read_direction = 1,
+ write_direction = 2,
+ both_direction = 3
+ }
+ direction;
+
+ /* Format_specific flags. */
+ flagword flags;
+
+ /* Currently my_archive is tested before adding origin to
+ anything. I believe that this can become always an add of
+ origin, with origin set to 0 for non archive files. */
+ ufile_ptr origin;
+
+ /* Remember when output has begun, to stop strange things
+ from happening. */
+ bfd_boolean output_has_begun;
+
+ /* A hash table for section names. */
+ struct bfd_hash_table section_htab;
+
+ /* Pointer to linked list of sections. */
+ struct bfd_section *sections;
+
+ /* The last section on the section list. */
+ struct bfd_section *section_last;
+
+ /* The number of sections. */
+ unsigned int section_count;
+
+ /* Stuff only useful for object files:
+ The start address. */
+ bfd_vma start_address;
+
+ /* Used for input and output. */
+ unsigned int symcount;
+
+ /* Symbol table for output BFD (with symcount entries). */
+ struct bfd_symbol **outsymbols;
+
+ /* Used for slurped dynamic symbol tables. */
+ unsigned int dynsymcount;
+
+ /* Pointer to structure which contains architecture information. */
+ const struct bfd_arch_info *arch_info;
+
+ /* Flag set if symbols from this BFD should not be exported. */
+ bfd_boolean no_export;
+
+ /* Stuff only useful for archives. */
+ void *arelt_data;
+ struct bfd *my_archive; /* The containing archive BFD. */
+ struct bfd *next; /* The next BFD in the archive. */
+ struct bfd *archive_head; /* The first BFD in the archive. */
+ bfd_boolean has_armap;
+
+ /* A chain of BFD structures involved in a link. */
+ struct bfd *link_next;
+
+ /* A field used by _bfd_generic_link_add_archive_symbols. This will
+ be used only for archive elements. */
+ int archive_pass;
+
+ /* Used by the back end to hold private data. */
+ union
+ {
+ struct aout_data_struct *aout_data;
+ struct artdata *aout_ar_data;
+ struct _oasys_data *oasys_obj_data;
+ struct _oasys_ar_data *oasys_ar_data;
+ struct coff_tdata *coff_obj_data;
+ struct pe_tdata *pe_obj_data;
+ struct xcoff_tdata *xcoff_obj_data;
+ struct ecoff_tdata *ecoff_obj_data;
+ struct ieee_data_struct *ieee_data;
+ struct ieee_ar_data_struct *ieee_ar_data;
+ struct srec_data_struct *srec_data;
+ struct ihex_data_struct *ihex_data;
+ struct tekhex_data_struct *tekhex_data;
+ struct elf_obj_tdata *elf_obj_data;
+ struct nlm_obj_tdata *nlm_obj_data;
+ struct bout_data_struct *bout_data;
+ struct mmo_data_struct *mmo_data;
+ struct sun_core_struct *sun_core_data;
+ struct sco5_core_struct *sco5_core_data;
+ struct trad_core_struct *trad_core_data;
+ struct som_data_struct *som_data;
+ struct hpux_core_struct *hpux_core_data;
+ struct hppabsd_core_struct *hppabsd_core_data;
+ struct sgi_core_struct *sgi_core_data;
+ struct lynx_core_struct *lynx_core_data;
+ struct osf_core_struct *osf_core_data;
+ struct cisco_core_struct *cisco_core_data;
+ struct versados_data_struct *versados_data;
+ struct netbsd_core_struct *netbsd_core_data;
+ struct mach_o_data_struct *mach_o_data;
+ struct mach_o_fat_data_struct *mach_o_fat_data;
+ struct bfd_pef_data_struct *pef_data;
+ struct bfd_pef_xlib_data_struct *pef_xlib_data;
+ struct bfd_sym_data_struct *sym_data;
+ void *any;
+ }
+ tdata;
+
+ /* Used by the application to hold private data. */
+ void *usrdata;
+
+ /* Where all the allocated stuff under this BFD goes. This is a
+ struct objalloc *, but we use void * to avoid requiring the inclusion
+ of objalloc.h. */
+ void *memory;
+};
+
+typedef enum bfd_error
+{
+ bfd_error_no_error = 0,
+ bfd_error_system_call,
+ bfd_error_invalid_target,
+ bfd_error_wrong_format,
+ bfd_error_wrong_object_format,
+ bfd_error_invalid_operation,
+ bfd_error_no_memory,
+ bfd_error_no_symbols,
+ bfd_error_no_armap,
+ bfd_error_no_more_archived_files,
+ bfd_error_malformed_archive,
+ bfd_error_file_not_recognized,
+ bfd_error_file_ambiguously_recognized,
+ bfd_error_no_contents,
+ bfd_error_nonrepresentable_section,
+ bfd_error_no_debug_section,
+ bfd_error_bad_value,
+ bfd_error_file_truncated,
+ bfd_error_file_too_big,
+ bfd_error_invalid_error_code
+}
+bfd_error_type;
+
+bfd_error_type bfd_get_error (void);
+
+void bfd_set_error (bfd_error_type error_tag);
+
+const char *bfd_errmsg (bfd_error_type error_tag);
+
+void bfd_perror (const char *message);
+
+typedef void (*bfd_error_handler_type) (const char *, ...);
+
+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
+
+void bfd_set_error_program_name (const char *);
+
+bfd_error_handler_type bfd_get_error_handler (void);
+
+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
+
+long bfd_canonicalize_reloc
+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
+
+void bfd_set_reloc
+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
+
+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
+
+int bfd_get_arch_size (bfd *abfd);
+
+int bfd_get_sign_extend_vma (bfd *abfd);
+
+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
+
+unsigned int bfd_get_gp_size (bfd *abfd);
+
+void bfd_set_gp_size (bfd *abfd, unsigned int i);
+
+bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
+
+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_header_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_header_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_merge_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
+
+#define bfd_set_private_flags(abfd, flags) \
+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
+#define bfd_sizeof_headers(abfd, reloc) \
+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc))
+
+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_nearest_line, \
+ (abfd, sec, syms, off, file, func, line))
+
+#define bfd_find_line(abfd, syms, sym, file, line) \
+ BFD_SEND (abfd, _bfd_find_line, \
+ (abfd, syms, sym, file, line))
+
+#define bfd_find_inliner_info(abfd, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_inliner_info, \
+ (abfd, file, func, line))
+
+#define bfd_debug_info_start(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
+
+#define bfd_debug_info_end(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
+
+#define bfd_debug_info_accumulate(abfd, section) \
+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
+
+#define bfd_stat_arch_elt(abfd, stat) \
+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
+
+#define bfd_update_armap_timestamp(abfd) \
+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
+
+#define bfd_set_arch_mach(abfd, arch, mach)\
+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
+
+#define bfd_relax_section(abfd, section, link_info, again) \
+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
+
+#define bfd_gc_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
+
+#define bfd_merge_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
+
+#define bfd_is_group_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
+
+#define bfd_discard_group(abfd, sec) \
+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
+
+#define bfd_link_hash_table_create(abfd) \
+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
+
+#define bfd_link_hash_table_free(abfd, hash) \
+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
+
+#define bfd_link_add_symbols(abfd, info) \
+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
+
+#define bfd_link_just_syms(abfd, sec, info) \
+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
+
+#define bfd_final_link(abfd, info) \
+ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
+
+#define bfd_free_cached_info(abfd) \
+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
+
+#define bfd_get_dynamic_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
+
+#define bfd_print_private_bfd_data(abfd, file)\
+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
+
+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
+
+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
+ dyncount, dynsyms, ret))
+
+#define bfd_get_dynamic_reloc_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
+
+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
+
+extern bfd_byte *bfd_get_relocated_section_contents
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
+ bfd_boolean, asymbol **);
+
+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
+
+struct bfd_preserve
+{
+ void *marker;
+ void *tdata;
+ flagword flags;
+ const struct bfd_arch_info *arch_info;
+ struct bfd_section *sections;
+ struct bfd_section *section_last;
+ unsigned int section_count;
+ struct bfd_hash_table section_htab;
+};
+
+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_restore (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_finish (bfd *, struct bfd_preserve *);
+
+/* Extracted from archive.c. */
+symindex bfd_get_next_mapent
+ (bfd *abfd, symindex previous, carsym **sym);
+
+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
+
+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
+
+/* Extracted from corefile.c. */
+const char *bfd_core_file_failing_command (bfd *abfd);
+
+int bfd_core_file_failing_signal (bfd *abfd);
+
+bfd_boolean core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+/* Extracted from targets.c. */
+#define BFD_SEND(bfd, message, arglist) \
+ ((*((bfd)->xvec->message)) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND
+#define BFD_SEND(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ ((*((bfd)->xvec->message)) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND_FMT
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+
+enum bfd_flavour
+{
+ bfd_target_unknown_flavour,
+ bfd_target_aout_flavour,
+ bfd_target_coff_flavour,
+ bfd_target_ecoff_flavour,
+ bfd_target_xcoff_flavour,
+ bfd_target_elf_flavour,
+ bfd_target_ieee_flavour,
+ bfd_target_nlm_flavour,
+ bfd_target_oasys_flavour,
+ bfd_target_tekhex_flavour,
+ bfd_target_srec_flavour,
+ bfd_target_ihex_flavour,
+ bfd_target_som_flavour,
+ bfd_target_os9k_flavour,
+ bfd_target_versados_flavour,
+ bfd_target_msdos_flavour,
+ bfd_target_ovax_flavour,
+ bfd_target_evax_flavour,
+ bfd_target_mmo_flavour,
+ bfd_target_mach_o_flavour,
+ bfd_target_pef_flavour,
+ bfd_target_pef_xlib_flavour,
+ bfd_target_sym_flavour
+};
+
+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
+
+/* Forward declaration. */
+typedef struct bfd_link_info _bfd_link_info;
+
+typedef struct bfd_target
+{
+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
+ char *name;
+
+ /* The "flavour" of a back end is a general indication about
+ the contents of a file. */
+ enum bfd_flavour flavour;
+
+ /* The order of bytes within the data area of a file. */
+ enum bfd_endian byteorder;
+
+ /* The order of bytes within the header parts of a file. */
+ enum bfd_endian header_byteorder;
+
+ /* A mask of all the flags which an executable may have set -
+ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
+ flagword object_flags;
+
+ /* A mask of all the flags which a section may have set - from
+ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
+ flagword section_flags;
+
+ /* The character normally found at the front of a symbol.
+ (if any), perhaps `_'. */
+ char symbol_leading_char;
+
+ /* The pad character for file names within an archive header. */
+ char ar_pad_char;
+
+ /* The maximum number of characters in an archive header. */
+ unsigned short ar_max_namelen;
+
+ /* Entries for byte swapping for data. These are different from the
+ other entry points, since they don't take a BFD as the first argument.
+ Certain other handlers could do the same. */
+ bfd_uint64_t (*bfd_getx64) (const void *);
+ bfd_int64_t (*bfd_getx_signed_64) (const void *);
+ void (*bfd_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_getx32) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ void (*bfd_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_getx16) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
+ void (*bfd_putx16) (bfd_vma, void *);
+
+ /* Byte swapping for the headers. */
+ bfd_uint64_t (*bfd_h_getx64) (const void *);
+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+ void (*bfd_h_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_h_getx32) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ void (*bfd_h_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_h_getx16) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
+ void (*bfd_h_putx16) (bfd_vma, void *);
+
+ /* Format dependent routines: these are vectors of entry points
+ within the target vector structure, one for each format to check. */
+
+ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
+
+ /* Set the format of a file being written. */
+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
+
+ /* Write cached information into a file being written, at <<bfd_close>>. */
+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
+
+
+ /* Generic entry points. */
+#define BFD_JUMP_TABLE_GENERIC(NAME) \
+ NAME##_close_and_cleanup, \
+ NAME##_bfd_free_cached_info, \
+ NAME##_new_section_hook, \
+ NAME##_get_section_contents, \
+ NAME##_get_section_contents_in_window
+
+ /* Called when the BFD is being closed to do any necessary cleanup. */
+ bfd_boolean (*_close_and_cleanup) (bfd *);
+ /* Ask the BFD to free all cached information. */
+ bfd_boolean (*_bfd_free_cached_info) (bfd *);
+ /* Called when a new section is created. */
+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
+ /* Read the contents of a section. */
+ bfd_boolean (*_bfd_get_section_contents)
+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
+ bfd_boolean (*_bfd_get_section_contents_in_window)
+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
+
+ /* Entry points to copy private data. */
+#define BFD_JUMP_TABLE_COPY(NAME) \
+ NAME##_bfd_copy_private_bfd_data, \
+ NAME##_bfd_merge_private_bfd_data, \
+ NAME##_bfd_copy_private_section_data, \
+ NAME##_bfd_copy_private_symbol_data, \
+ NAME##_bfd_copy_private_header_data, \
+ NAME##_bfd_set_private_flags, \
+ NAME##_bfd_print_private_bfd_data
+
+ /* Called to copy BFD general private data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
+ /* Called to merge BFD general private data from one object file
+ to a common output file when linking. */
+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
+ /* Called to copy BFD private section data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr);
+ /* Called to copy BFD private symbol data from one symbol
+ to another. */
+ bfd_boolean (*_bfd_copy_private_symbol_data)
+ (bfd *, asymbol *, bfd *, asymbol *);
+ /* Called to copy BFD private header data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_header_data)
+ (bfd *, bfd *);
+ /* Called to set private backend flags. */
+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
+
+ /* Called to print private BFD data. */
+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
+
+ /* Core file entry points. */
+#define BFD_JUMP_TABLE_CORE(NAME) \
+ NAME##_core_file_failing_command, \
+ NAME##_core_file_failing_signal, \
+ NAME##_core_file_matches_executable_p
+
+ char * (*_core_file_failing_command) (bfd *);
+ int (*_core_file_failing_signal) (bfd *);
+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
+
+ /* Archive entry points. */
+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
+ NAME##_slurp_armap, \
+ NAME##_slurp_extended_name_table, \
+ NAME##_construct_extended_name_table, \
+ NAME##_truncate_arname, \
+ NAME##_write_armap, \
+ NAME##_read_ar_hdr, \
+ NAME##_openr_next_archived_file, \
+ NAME##_get_elt_at_index, \
+ NAME##_generic_stat_arch_elt, \
+ NAME##_update_armap_timestamp
+
+ bfd_boolean (*_bfd_slurp_armap) (bfd *);
+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
+ bfd_boolean (*_bfd_construct_extended_name_table)
+ (bfd *, char **, bfd_size_type *, const char **);
+ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
+ bfd_boolean (*write_armap)
+ (bfd *, unsigned int, struct orl *, unsigned int, int);
+ void * (*_bfd_read_ar_hdr_fn) (bfd *);
+ bfd * (*openr_next_archived_file) (bfd *, bfd *);
+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
+
+ /* Entry points used for symbols. */
+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
+ NAME##_get_symtab_upper_bound, \
+ NAME##_canonicalize_symtab, \
+ NAME##_make_empty_symbol, \
+ NAME##_print_symbol, \
+ NAME##_get_symbol_info, \
+ NAME##_bfd_is_local_label_name, \
+ NAME##_bfd_is_target_special_symbol, \
+ NAME##_get_lineno, \
+ NAME##_find_nearest_line, \
+ _bfd_generic_find_line, \
+ NAME##_find_inliner_info, \
+ NAME##_bfd_make_debug_symbol, \
+ NAME##_read_minisymbols, \
+ NAME##_minisymbol_to_symbol
+
+ long (*_bfd_get_symtab_upper_bound) (bfd *);
+ long (*_bfd_canonicalize_symtab)
+ (bfd *, struct bfd_symbol **);
+ struct bfd_symbol *
+ (*_bfd_make_empty_symbol) (bfd *);
+ void (*_bfd_print_symbol)
+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
+ void (*_bfd_get_symbol_info)
+ (bfd *, struct bfd_symbol *, symbol_info *);
+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
+ bfd_boolean (*_bfd_find_nearest_line)
+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
+ const char **, const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_line)
+ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
+ const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_inliner_info)
+ (bfd *, const char **, const char **, unsigned int *);
+ /* Back-door to allow format-aware applications to create debug symbols
+ while using BFD for everything else. Currently used by the assembler
+ when creating COFF files. */
+ asymbol * (*_bfd_make_debug_symbol)
+ (bfd *, void *, unsigned long size);
+#define bfd_read_minisymbols(b, d, m, s) \
+ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
+ long (*_read_minisymbols)
+ (bfd *, bfd_boolean, void **, unsigned int *);
+#define bfd_minisymbol_to_symbol(b, d, m, f) \
+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
+ asymbol * (*_minisymbol_to_symbol)
+ (bfd *, bfd_boolean, const void *, asymbol *);
+
+ /* Routines for relocs. */
+#define BFD_JUMP_TABLE_RELOCS(NAME) \
+ NAME##_get_reloc_upper_bound, \
+ NAME##_canonicalize_reloc, \
+ NAME##_bfd_reloc_type_lookup
+
+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
+ long (*_bfd_canonicalize_reloc)
+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
+ /* See documentation on reloc types. */
+ reloc_howto_type *
+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
+
+ /* Routines used when writing an object file. */
+#define BFD_JUMP_TABLE_WRITE(NAME) \
+ NAME##_set_arch_mach, \
+ NAME##_set_section_contents
+
+ bfd_boolean (*_bfd_set_arch_mach)
+ (bfd *, enum bfd_architecture, unsigned long);
+ bfd_boolean (*_bfd_set_section_contents)
+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
+
+ /* Routines used by the linker. */
+#define BFD_JUMP_TABLE_LINK(NAME) \
+ NAME##_sizeof_headers, \
+ NAME##_bfd_get_relocated_section_contents, \
+ NAME##_bfd_relax_section, \
+ NAME##_bfd_link_hash_table_create, \
+ NAME##_bfd_link_hash_table_free, \
+ NAME##_bfd_link_add_symbols, \
+ NAME##_bfd_link_just_syms, \
+ NAME##_bfd_final_link, \
+ NAME##_bfd_link_split_section, \
+ NAME##_bfd_gc_sections, \
+ NAME##_bfd_merge_sections, \
+ NAME##_bfd_is_group_section, \
+ NAME##_bfd_discard_group, \
+ NAME##_section_already_linked \
+
+ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean);
+ bfd_byte * (*_bfd_get_relocated_section_contents)
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
+ bfd_byte *, bfd_boolean, struct bfd_symbol **);
+
+ bfd_boolean (*_bfd_relax_section)
+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
+
+ /* Create a hash table for the linker. Different backends store
+ different information in this table. */
+ struct bfd_link_hash_table *
+ (*_bfd_link_hash_table_create) (bfd *);
+
+ /* Release the memory associated with the linker hash table. */
+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
+
+ /* Add symbols from this object file into the hash table. */
+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
+
+ /* Indicate that we are only retrieving symbol values from this section. */
+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
+
+ /* Do a link based on the link_order structures attached to each
+ section of the BFD. */
+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
+
+ /* Should this section be split up into smaller pieces during linking. */
+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
+
+ /* Remove sections that are not referenced from the output. */
+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
+
+ /* Attempt to merge SEC_MERGE sections. */
+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
+
+ /* Is this section a member of a group? */
+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
+
+ /* Discard members of a group. */
+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
+
+ /* Check if SEC has been already linked during a reloceatable or
+ final link. */
+ void (*_section_already_linked) (bfd *, struct bfd_section *);
+
+ /* Routines to handle dynamic symbols and relocs. */
+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
+ NAME##_get_dynamic_symtab_upper_bound, \
+ NAME##_canonicalize_dynamic_symtab, \
+ NAME##_get_synthetic_symtab, \
+ NAME##_get_dynamic_reloc_upper_bound, \
+ NAME##_canonicalize_dynamic_reloc
+
+ /* Get the amount of memory required to hold the dynamic symbols. */
+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
+ /* Read in the dynamic symbols. */
+ long (*_bfd_canonicalize_dynamic_symtab)
+ (bfd *, struct bfd_symbol **);
+ /* Create synthetized symbols. */
+ long (*_bfd_get_synthetic_symtab)
+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
+ struct bfd_symbol **);
+ /* Get the amount of memory required to hold the dynamic relocs. */
+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
+ /* Read in the dynamic relocs. */
+ long (*_bfd_canonicalize_dynamic_reloc)
+ (bfd *, arelent **, struct bfd_symbol **);
+
+ /* Opposite endian version of this target. */
+ const struct bfd_target * alternative_target;
+
+ /* Data for use by back-end routines, which isn't
+ generic enough to belong in this structure. */
+ const void *backend_data;
+
+} bfd_target;
+
+bfd_boolean bfd_set_default_target (const char *name);
+
+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
+
+const char ** bfd_target_list (void);
+
+const bfd_target *bfd_search_for_target
+ (int (*search_func) (const bfd_target *, void *),
+ void *);
+
+/* Extracted from format.c. */
+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
+
+bfd_boolean bfd_check_format_matches
+ (bfd *abfd, bfd_format format, char ***matching);
+
+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
+
+const char *bfd_format_string (bfd_format format);
+
+/* Extracted from linker.c. */
+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
+
+#define bfd_link_split_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
+
+void bfd_section_already_linked (bfd *abfd, asection *sec);
+
+#define bfd_section_already_linked(abfd, sec) \
+ BFD_SEND (abfd, _section_already_linked, (abfd, sec))
+
+/* Extracted from simple.c. */
+bfd_byte *bfd_simple_get_relocated_section_contents
+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -49,6 +49,7 @@
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
+#define KDBENTER_VECTOR 0x81
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
@@ -102,6 +103,12 @@
#define NUM_INVALIDATE_TLB_VECTORS 8
/*
+ * KDB_VECTOR will take over vector 0xfe when it is needed, as in theory
+ * it should not be used anyway.
+ */
+#define KDB_VECTOR 0xfe
+
+/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
--- /dev/null
+++ b/arch/x86/include/asm/kdb.h
@@ -0,0 +1,140 @@
+#ifndef _ASM_KDB_H
+#define _ASM_KDB_H
+
+/*
+ * Kernel Debugger Architecture Dependent (x86) Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * KDB_ENTER() is a macro which causes entry into the kernel
+ * debugger from any point in the kernel code stream. If it
+ * is intended to be used from interrupt level, it must use
+ * a non-maskable entry method. The vector is KDB_VECTOR,
+ * defined in hw_irq.h
+ */
+#define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("\tint $129\n"); }} while(0)
+
+/*
+ * Needed for exported symbols.
+ */
+typedef unsigned long kdb_machreg_t;
+
+/*
+ * Per cpu arch specific kdb state. Must be in range 0xff000000.
+ */
+#define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */
+
+
+#ifdef CONFIG_X86_32
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%08lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%08lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%08x"
+#define kdb_f_count_fmt "%ld"
+
+#else /* CONFIG_X86_32 */
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%016lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%016lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%016x"
+#define kdb_f_count_fmt "%ld"
+
+/*
+ * Functions to safely read and write kernel areas. The {to,from}_xxx
+ * addresses are not necessarily valid, these functions must check for
+ * validity. If the arch already supports get and put routines with
+ * suitable validation and/or recovery on invalid addresses then use
+ * those routines, otherwise check it yourself.
+ */
+
+/*
+ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to
+ * trap invalid addresses in the _xxx fields. Verify the other address
+ * of the pair is valid by accessing the first and last byte ourselves,
+ * then any access violations should only be caused by the _xxx
+ * addresses,
+ */
+
+#include <asm/uaccess.h>
+
+static inline int
+__kdba_putarea_size(unsigned long to_xxx, void *from, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int r;
+ char c;
+ c = *((volatile char *)from);
+ c = *((volatile char *)from + size - 1);
+
+ if (to_xxx < PAGE_OFFSET) {
+ return kdb_putuserarea_size(to_xxx, from, size);
+ }
+
+ set_fs(KERNEL_DS);
+ r = __copy_to_user_inatomic((void *)to_xxx, from, size);
+ set_fs(oldfs);
+ return r;
+}
+
+static inline int
+__kdba_getarea_size(void *to, unsigned long from_xxx, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int r;
+ *((volatile char *)to) = '\0';
+ *((volatile char *)to + size - 1) = '\0';
+
+ if (from_xxx < PAGE_OFFSET) {
+ return kdb_getuserarea_size(to, from_xxx, size);
+ }
+
+ set_fs(KERNEL_DS);
+ r = __copy_to_user_inatomic(to, (void *)from_xxx, size);
+ set_fs(oldfs);
+ return r;
+}
+
+/* For numa with replicated code/data, the platform must supply its own
+ * kdba_putarea_size and kdba_getarea_size routines. Without replication kdb
+ * uses the standard architecture routines.
+ */
+#ifdef CONFIG_NUMA_REPLICATE
+extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size);
+extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size);
+#else
+#define kdba_putarea_size __kdba_putarea_size
+#define kdba_getarea_size __kdba_getarea_size
+#endif
+
+static inline int
+kdba_verify_rw(unsigned long addr, size_t size)
+{
+ unsigned char data[size];
+ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size));
+}
+
+#endif /* !CONFIG_X86_32 */
+
+static inline unsigned long
+kdba_funcptr_value(void *fp)
+{
+ return (unsigned long)fp;
+}
+
+#ifdef CONFIG_SMP
+extern void kdba_giveback_vector(int);
+#endif
+
+#endif /* !_ASM_KDB_H */
--- /dev/null
+++ b/arch/x86/include/asm/kdbprivate.h
@@ -0,0 +1,241 @@
+#ifndef _ASM_KDBPRIVATE_H
+#define _ASM_KDBPRIVATE_H
+
+/*
+ * Kernel Debugger Architecture Dependent (x86) Private Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+typedef unsigned char kdb_machinst_t;
+
+/*
+ * KDB_MAXBPT describes the total number of breakpoints
+ * supported by this architecure.
+ */
+#define KDB_MAXBPT 16
+
+/*
+ * KDB_MAXHARDBPT describes the total number of hardware
+ * breakpoint registers that exist.
+ */
+#define KDB_MAXHARDBPT 4
+
+/* Maximum number of arguments to a function */
+#define KDBA_MAXARGS 16
+
+/*
+ * Support for ia32 debug registers
+ */
+typedef struct _kdbhard_bp {
+ kdb_machreg_t bph_reg; /* Register this breakpoint uses */
+
+ unsigned int bph_free:1; /* Register available for use */
+ unsigned int bph_data:1; /* Data Access breakpoint */
+
+ unsigned int bph_write:1; /* Write Data breakpoint */
+ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */
+ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */
+ unsigned int bph_installed; /* flag: hw bp is installed */
+} kdbhard_bp_t;
+
+#define IA32_BREAKPOINT_INSTRUCTION 0xcc
+
+#define DR6_BT 0x00008000
+#define DR6_BS 0x00004000
+#define DR6_BD 0x00002000
+
+#define DR6_B3 0x00000008
+#define DR6_B2 0x00000004
+#define DR6_B1 0x00000002
+#define DR6_B0 0x00000001
+#define DR6_DR_MASK 0x0000000F
+
+#define DR7_RW_VAL(dr, drnum) \
+ (((dr) >> (16 + (4 * (drnum)))) & 0x3)
+
+#define DR7_RW_SET(dr, drnum, rw) \
+ do { \
+ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \
+ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \
+ } while (0)
+
+#define DR7_RW0(dr) DR7_RW_VAL(dr, 0)
+#define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw)
+#define DR7_RW1(dr) DR7_RW_VAL(dr, 1)
+#define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw)
+#define DR7_RW2(dr) DR7_RW_VAL(dr, 2)
+#define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw)
+#define DR7_RW3(dr) DR7_RW_VAL(dr, 3)
+#define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw)
+
+
+#define DR7_LEN_VAL(dr, drnum) \
+ (((dr) >> (18 + (4 * (drnum)))) & 0x3)
+
+#define DR7_LEN_SET(dr, drnum, rw) \
+ do { \
+ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \
+ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \
+ } while (0)
+
+#define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0)
+#define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len)
+#define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1)
+#define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len)
+#define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2)
+#define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len)
+#define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3)
+#define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len)
+
+#define DR7_G0(dr) (((dr)>>1)&0x1)
+#define DR7_G0SET(dr) ((dr) |= 0x2)
+#define DR7_G0CLR(dr) ((dr) &= ~0x2)
+#define DR7_G1(dr) (((dr)>>3)&0x1)
+#define DR7_G1SET(dr) ((dr) |= 0x8)
+#define DR7_G1CLR(dr) ((dr) &= ~0x8)
+#define DR7_G2(dr) (((dr)>>5)&0x1)
+#define DR7_G2SET(dr) ((dr) |= 0x20)
+#define DR7_G2CLR(dr) ((dr) &= ~0x20)
+#define DR7_G3(dr) (((dr)>>7)&0x1)
+#define DR7_G3SET(dr) ((dr) |= 0x80)
+#define DR7_G3CLR(dr) ((dr) &= ~0x80)
+
+#define DR7_L0(dr) (((dr))&0x1)
+#define DR7_L0SET(dr) ((dr) |= 0x1)
+#define DR7_L0CLR(dr) ((dr) &= ~0x1)
+#define DR7_L1(dr) (((dr)>>2)&0x1)
+#define DR7_L1SET(dr) ((dr) |= 0x4)
+#define DR7_L1CLR(dr) ((dr) &= ~0x4)
+#define DR7_L2(dr) (((dr)>>4)&0x1)
+#define DR7_L2SET(dr) ((dr) |= 0x10)
+#define DR7_L2CLR(dr) ((dr) &= ~0x10)
+#define DR7_L3(dr) (((dr)>>6)&0x1)
+#define DR7_L3SET(dr) ((dr) |= 0x40)
+#define DR7_L3CLR(dr) ((dr) &= ~0x40)
+
+#define DR7_GD 0x00002000 /* General Detect Enable */
+#define DR7_GE 0x00000200 /* Global exact */
+#define DR7_LE 0x00000100 /* Local exact */
+
+extern kdb_machreg_t kdba_getdr6(void);
+extern void kdba_putdr6(kdb_machreg_t);
+
+extern kdb_machreg_t kdba_getdr7(void);
+
+struct kdba_running_process {
+ long sp; /* KDB may be on a different stack */
+ long ip; /* eip when esp was set */
+};
+
+static inline
+void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs)
+{
+}
+
+struct kdb_activation_record;
+extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
+ struct kdb_activation_record *ar);
+
+extern void kdba_wait_for_cpus(void);
+
+
+#ifdef CONFIG_X86_32
+
+#define DR_TYPE_EXECUTE 0x0
+#define DR_TYPE_WRITE 0x1
+#define DR_TYPE_IO 0x2
+#define DR_TYPE_RW 0x3
+
+/*
+ * Platform specific environment entries
+ */
+#define KDB_PLATFORM_ENV "IDMODE=x86", "BYTESPERWORD=4", "IDCOUNT=16"
+
+/*
+ * Support for setjmp/longjmp
+ */
+#define JB_BX 0
+#define JB_SI 1
+#define JB_DI 2
+#define JB_BP 3
+#define JB_SP 4
+#define JB_PC 5
+
+typedef struct __kdb_jmp_buf {
+ unsigned long regs[6]; /* kdba_setjmp assumes fixed offsets here */
+} kdb_jmp_buf;
+
+extern int asmlinkage kdba_setjmp(kdb_jmp_buf *);
+extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int);
+#define kdba_setjmp kdba_setjmp
+
+extern kdb_jmp_buf *kdbjmpbuf;
+
+/* Arch specific data saved for running processes */
+static inline
+void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs)
+{
+ k->sp = current_stack_pointer;
+ __asm__ __volatile__ ( " lea 1f,%%eax; movl %%eax,%0 ; 1: " : "=r"(k->ip) : : "eax" );
+}
+
+extern void kdb_interrupt(void);
+
+#define KDB_INT_REGISTERS 8
+
+#else /* CONFIG_X86_32 */
+
+extern kdb_machreg_t kdba_getdr(int);
+extern void kdba_putdr(int, kdb_machreg_t);
+
+extern kdb_machreg_t kdb_getcr(int);
+
+/*
+ * Platform specific environment entries
+ */
+#define KDB_PLATFORM_ENV "IDMODE=x86_64", "BYTESPERWORD=8", "IDCOUNT=16"
+
+/*
+ * reg indicies for x86_64 setjmp/longjmp
+ */
+#define JB_RBX 0
+#define JB_RBP 1
+#define JB_R12 2
+#define JB_R13 3
+#define JB_R14 4
+#define JB_R15 5
+#define JB_RSP 6
+#define JB_PC 7
+
+typedef struct __kdb_jmp_buf {
+ unsigned long regs[8]; /* kdba_setjmp assumes fixed offsets here */
+} kdb_jmp_buf;
+
+extern int asmlinkage kdba_setjmp(kdb_jmp_buf *);
+extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int);
+#define kdba_setjmp kdba_setjmp
+
+extern kdb_jmp_buf *kdbjmpbuf;
+
+/* Arch specific data saved for running processes */
+register unsigned long current_stack_pointer asm("rsp") __used;
+
+static inline
+void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs)
+{
+ k->sp = current_stack_pointer;
+ __asm__ __volatile__ ( " lea 0(%%rip),%%rax; movq %%rax,%0 ; " : "=r"(k->ip) : : "rax" );
+}
+
+extern asmlinkage void kdb_interrupt(void);
+
+#define KDB_INT_REGISTERS 16
+
+#endif /* !CONFIG_X86_32 */
+
+#endif /* !_ASM_KDBPRIVATE_H */
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -15,6 +15,8 @@ enum die_val {
DIE_DIE,
DIE_NMIWATCHDOG,
DIE_KERNELDEBUG,
+ DIE_KDEBUG_ENTER,
+ DIE_KDEBUG_LEAVE,
DIE_TRAP,
DIE_GPF,
DIE_CALL,
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -16,6 +16,29 @@
/* this struct defines the way the registers are stored on the
stack during a system call. */
+enum EFLAGS {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
#ifndef __KERNEL__
struct pt_regs {
--- /dev/null
+++ b/arch/x86/kdb/ChangeLog
@@ -0,0 +1,262 @@
+2008-11-26 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.28-rc6-x86-1.
+
+2008-11-12 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.28-rc4-x86-1.
+
+2008-11-04 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.28-rc3-x86-1.
+
+2008-10-28 Jay Lan <jlan@sgi.com>
+
+ * "Commandeer vector 0xfe for KDB_VECTOR", version 2.
+ Cliff Wickman <cpw@sgi.com>
+ * kdb-v4.4-2.6.28-rc2-x86-2.
+
+2008-10-27 Jay Lan <jlan@sgi.com>
+
+ * Commandeer vector 0xfe for KDB_VECTOR,
+ Cliff Wickman <cpw@sgi.com>
+ * Fix KDB-KDUMP problems on IBM xSeries,
+ Bernhard Walle <bwalle@suse.de>, Jay Lan <jlan@sgi.com>
+ * Fix crash when panic() from task context,
+ Bernhard Walle <bwalle@suse.de>
+ * kdb-v4.4-2.6.28-rc2-x86-1.
+
+2008-10-20 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.27-x86-1.
+
+2008-09-30 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.27-rc8-x86-1.
+
+2008-09-22 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.27-rc7-x86-1.
+
+2008-09-03 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.27-rc5-x86-1.
+
+2008-08-19 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.27-rc3-x86-1.
+
+2008-08-14 Jay Lan <jlan@sgi.com>
+
+ * Support 'kdump' command to take a kdump vmcore from KDB,
+ Dan Aloni (da-x@monatomic.org),
+ Jason Xiao (jidong.xiao@gmail.com),
+ Jay Lan (jlan@sgi.com)
+ * kdb-v4.4-2.6.27-rc2-x86-2.
+
+2008-08-06 Jay Lan <jlan@sgi.com>
+
+ * Fix up the NULL pointer deference issue in ohci_kdb_poll_char,
+ Jason Xiao <jidong.xiao@gmail.com>
+ * Backtrace on x86_64 and i386 were incomplete since 2.6.27-rc2.
+ * kdb-v4.4-2.6.27-rc2-x86-1.
+
+2008-07-18 Jay Lan <jlan@sgi.com>
+
+ * support Hardware Breakpoint (bph/bpha) commands
+ IA64: Greg Banks <gnb@sgi.com>
+ X86: Konstantin Baydarov <kbaidarov@ru.mvista.com>
+ * kdb-v4.4-2.6.26-x86-2.
+
+2008-07-14 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.26-x86-1.
+
+2008-07-11 Jay Lan <jlan@sgi.com>
+
+ * New commands and some fixups and enhancements,
+ Joe Korty <joe.korty@ccur.com>
+ John Blackwood <john.blackwood@ccur.com>
+ Jim Houston <jim.houston@ccur.com>
+ - Use the non-sleeping copy_from_user_atomic.
+ - Enhance kdb_cmderror diagnostic output.
+ - Expand the KDB 'duplicate command' error message.
+ - Touch NMI watchdog in various KDB busy-loops.
+ - Support IMB HS20 Blade 8843 platform.
+ - Display exactly which cpus needed an NMI to get them into kdb.
+ - Better document that kdb's 'ps A' command can be used to show
+ _all_ processes and threads
+ - Suppress KDB boottime INFO messages if quiet boot.
+ - Add a KDB breakpoint to the OOPs path.
+ - Add CONFIG_DISCONTIGMEM support to kdbm_memmap.
+ - Extend the KDB task command to handle CONFIG_NUMA fields.
+ - Extend the KDB vm command to support NUMA stuff.
+ - Create the KDB mempolicy command.
+ - Create a pgdat command for KDB.
+ - Fix a hang on boot on some i386 systems.
+ * kdb-v4.4-2.6.26-rc9-x86-1.
+
+2008-06-30 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.26-rc8-x86-1.
+
+2008-06-25 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.26-rc7-x86-1.
+
+2008-06-06 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.26-rc5-x86-1.
+
+2008-05-30 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.26-rc4-x86-1.
+
+2008-05-20 Jay Lan <jlan@sgi.com>
+
+ * Merged <asm-x86/kdb_32.h> and <asm-x86/kdb_64.h> to <asm-x86/kdb.h>.
+ * Merged <asm-x86/kdbprivate_32.h> and <asm-x86/kdbprivate_64.h> to
+ <asm-x86/kdbprivate.h>.
+ * kdb-v4.4-2.6.26-rc3-x86-1.
+
+2008-05-15 Jay Lan <jlan@sgi.com>
+
+ * Fixed the i386 backtrace problem where KDB failed to find stacks
+ in the kernel space.
+ * kdb-v4.4-2.6.26-rc1-x86-3.
+
+2008-05-14 Jay Lan <jlan@sgi.com>
+
+ * Fixed a bug that bb_all scans only odd number entries of kallsyms.
+ * kdb-v4.4-2.6.26-rc1-x86-2.
+
+2008-05-13 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.26-rc1-x86-1.
+
+2008-05-13 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * Fixed a couple of x86_64 problems:
+ - "iret_label" are replaced by "irq_return".
+ - bb1 failure on ia32_sysenter_target() & ia32_cstar_target()
+ * kdb-v4.4-2.6.25-x86-2.
+
+2008-04-17 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.25-x86-1.
+
+2008-03-19 Jay Lan <jlan@sgi.com>
+
+ * i386: systenter_entry was replaced with ia32_sysenter_target since
+ 2.6.25-rc1, Jay Lan <jlan@sgi.com>
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.25-rc6-x86-2.
+
+2008-03-16 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.25-rc6-x86-1.
+
+2008-03-03 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.25-rc3-x86-1.
+
+2008-02-26 Jay Lan <jlan@sgi.com>
+
+ * remove 'fastcall' from kdb code.
+ * Known problem: backtrace for i386 is broken since 2.6.25-rc1.
+ * kdb-v4.4-2.6.25-rc2-x86-1.
+
+2008-02-19 Jay Lan <jlan@sgi.com>
+
+ * Known problem: backtrace for i386 is broken.
+ * kdb-v4.4-2.6.25-rc1-x86-1.
+
+2008-02-01 Jay Lan <jlan@sgi.com>
+
+ * Backed out USB UHCI support since it caused dropped characters and
+ broke OHCI.
+ * Restored "archkdbcommon" commands for x86. It was lost at the x86
+ merge.
+ * Detecting if the HC was "busy", Aaron Young <ayoung@sgi.com>
+ * kdb-v4.4-2.6.24-x86-2.
+
+2008-01-29 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.24-x86-1.
+
+2008-01-22 Jay Lan <jlan@sgi.com>
+
+ * USB UHCI kdb support, Konstantin Baydarov <kbaidarov@ru.mvista.com>
+ * kdb-v4.4-2.6.24-rc8-x86-3.
+
+2008-01-18 Jay Lan <jlan@sgi.com>
+
+ * USB EHCI kdb support, Aaron Young <ayoung@sgi.com>
+ * kdb-v4.4-2.6.24-rc8-x86-2.
+
+2008-01-18 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.24-rc8-x86-1.
+
+2008-01-09 Jay Lan <jlan@sgi.com>
+
+ * Merge arch/x86/kdb/kdba_io_64.c and arch/x86/kdb/kdba_io_32.c to
+ arch/x86/kdb/kdba_io.c
+ * Merge arch/x86/kdb/kdba_id_64.c and arch/x86/kdb/kdba_id_32.c to
+ arch/x86/kdb/kdba_id.c
+ * Merge arch/x86/kdb/pc_keyb_64.h and arch/x86/kdb/pc_keyb_32.h to
+ arch/x86/kdb/pc_keyb.h
+ * kdb-v4.4-2.6.24-rc7-x86-2.
+
+2008-01-07 Jay Lan <jlan@sgi.com>
+
+ * kdb-v4.4-2.6.24-rc7-x86-1.
+
+2007-12-21 Jay Lan <jlan@sgi.com>
+
+ * Renamed kdb/kdba_bt_x86.c to arch/x86/kdba_bt.c.
+ * Find gcc options 'no-optimize-sibling-calls' & 'regparm' from
+ $(KBUILD_CFLAGS) in arch/x86/kdb/Makefile_{32,64}. We used to
+ get them from $(CFLAGS).
+ * Default regparm to 3 on x86_32 if not defined.
+ * kdb v4.4-2.6.24-rc6-x86-1.
+
+2007-12-12 Jay Lan <jlan@sgi.com>
+
+ * Fixed a Makefile_32 error.
+ * kdb v4.4-2.6.24-rc5-x86-1.
+
+2007-12-05 Jay Lan <jlan@sgi.com>
+
+ * Fixed a 'sysctl table check failed' problem.
+ * kdb v4.4-2.6.24-rc4-x86-1.
+
+2007-11-26 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.24-rc3-x86-1.
+
+2007-11-13 Jay Lan <jlan@sgi.com>
+
+ * Back ported "New KDB USB interface" from Aaron Young in
+ v4.4-2.6.23-{i386,x86_64}-2 to 2.6.24 kdb patchset.
+ * Fixed a make problem at arch/x86/Makefile_{32,64}.
+ * kdb v4.4-2.6.24-rc2-x86-2.
+
+2007-11-12 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.24-rc2-x86-1.
+
+2007-11-09 Jay Lan <jlan@sgi.com>
+
+ * Rebase to 2.6.24-rc1 kernel
+ * - merged kdb-v4.4-2.6.23-i386-1 and kdb-v4.4-2.6.23-x86_64-1
+ * into kdb-v4.4-2.6.24-rc1-x86-1
+ * - Fields "done", "sglist_len", and "pid" are removed from
+ * struct scsi_cmnd. Thus, these fields are no longer displayed
+ * on "sc" command.
+ * kdb v4.4-2.6.24-rc1-x86-1.
--- /dev/null
+++ b/arch/x86/kdb/ChangeLog_32
@@ -0,0 +1,865 @@
+2007-11-08 Jay Lan <jlan@sgi.com>
+
+ * New KDB USB interface, Aaron Young <ayoung@sgi.com>
+ * 1. This patch allows KDB to work with any Host Contoller driver
+ * and call the correct HC driver poll routine (as long as the
+ * HC driver provides a .kdb_poll_char routine via it's
+ * associated hc_driver struct).
+ * 2. Hotplugged keyboards are now recognized by KDB.
+ * 3. Currently KDB can only make use of 1 USB type keyboard.
+ * New code can handle up to 8 attached keyboards - input is
+ * multiplexed from all of them while in kdb.
+ * kdb v4.4-2.6.23-common-2.
+
+2007-10-24 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-i386-1.
+
+2007-09-26 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc8-i386-1.
+
+2007-09-21 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc7-i386-1.
+
+2007-09-12 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc6-i386-1.
+
+2007-09-06 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc5-i386-1.
+
+2007-08-30 Keith Owens <kaos@sgi.com>
+
+ * New i386/x86_64 backtrace requires that kdb_save_running() does not
+ exit until after kdb_main_loop() has completed.
+ * kdb v4.4-2.6.23-rc4-i386-2.
+
+2007-08-30 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc4-i386-1.
+
+2007-08-24 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.23-rc3-i386-1.
+
+2007-08-07 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc2-i386-1.
+
+2007-07-31 Keith Owens <kaos@sgi.com>
+
+ * Delete obsolete kdba_bt.c.
+ * kdb v4.4-2.6.23-rc1-i386-2.
+
+2007-07-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.23-rc1-i386-1.
+
+2007-07-26 Keith Owens <kaos@sgi.com>
+
+ * New x86 backtrace code.
+ * kdb v4.4-2.6.22-i386-2.
+
+2007-07-09 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-i386-1.
+
+2007-07-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc7-i386-1.
+
+2007-06-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc5-i386-1.
+
+2007-06-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc4-i386-1.
+
+2007-05-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc3-i386-1.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * Register KDBENTER_VECTOR early on the boot cpu.
+ * kdb v4.4-2.6.22-rc2-i386-2.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc2-i386-1.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc1-i386-1.
+
+2007-05-17 Keith Owens <kaos@sgi.com>
+
+ * Update dumpregs comments for rdmsr and wrmsr commands.
+ Bernardo Innocenti.
+ * kdb v4.4-2.6.21-i386-3.
+
+2007-05-15 Keith Owens <kaos@sgi.com>
+
+ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used
+ earlier.
+ * kdb v4.4-2.6.21-i386-2.
+
+2007-04-29 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-i386-1.
+
+2007-04-16 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc7-i386-1.
+
+2007-04-10 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc6-i386-1.
+
+2007-04-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc5-i386-1.
+
+2007-03-19 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc4-i386-1.
+
+2007-03-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc3-i386-1.
+
+2007-03-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc2-i386-1.
+
+2007-03-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc1-i386-1.
+
+2007-03-01 Keith Owens <kaos@sgi.com>
+
+ * Remove sparse warnings.
+ * kdb v4.4-2.6.20-i386-3.
+
+2007-02-16 Keith Owens <kaos@sgi.com>
+
+ * Initialise variable bits of struct disassemble_info each time.
+ * kdb v4.4-2.6.20-i386-2.
+
+2007-02-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-i386-1.
+
+2007-02-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc7-i386-1.
+
+2007-01-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc4-i386-1.
+
+2007-01-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc3-i386-1.
+
+2006-12-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc1-i386-1.
+
+2006-11-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-i386-1.
+
+2006-11-27 Keith Owens <kaos@sgi.com>
+
+ * Only use VT keyboard if the command line allows it and ACPI indicates
+ that there is an i8042.
+ * kdb v4.4-2.6.19-rc6-i386-2.
+
+2006-11-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc6-i386-1.
+
+2006-11-09 Keith Owens <kaos@sgi.com>
+
+ * Change kdb() to fastcall.
+ * Add unwind info to kdb_call(). Steve Lord.
+ * Only use VT console if the command line allows it.
+ * kdb v4.4-2.6.19-rc5-i386-2.
+
+2006-11-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc5-i386-1.
+
+2006-11-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc4-i386-1.
+
+2006-10-24 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc3-i386-1.
+
+2006-10-24 Keith Owens <kaos@sgi.com>
+
+ * Remove redundant regs and envp parameters.
+ * kdb v4.4-2.6.19-rc2-i386-2.
+
+2006-10-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc2-i386-1.
+
+2006-10-11 Keith Owens <kaos@sgi.com>
+
+ * Move kdbm_x86.c from the i386 to the common KDB patch.
+ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it
+ simplifies the code and disables interrupts on KDBENTER().
+ * Exclude the KDBENTER_VECTOR from irq assignment.
+ * kdb v4.4-2.6.19-rc1-i386-2.
+
+2006-10-09 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc1-i386-1.
+
+2006-10-06 Keith Owens <kaos@sgi.com>
+
+ * Remove #include <linux/config.h>
+ * kdb v4.4-2.6.18-i386-2.
+
+2006-09-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-i386-1.
+
+2006-09-15 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc7-i386-1.
+
+2006-08-30 Keith Owens <kaos@sgi.com>
+
+ * Add warning for problems when following alternate stacks.
+ * kdb v4.4-2.6.18-rc5-i386-3.
+
+2006-08-29 Keith Owens <kaos@sgi.com>
+
+ * Rewrite all backtrace code.
+ * kdb v4.4-2.6.18-rc5-i386-2.
+
+2006-08-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc5-i386-1.
+
+2006-08-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc4-i386-1.
+
+2006-08-04 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc3-i386-1.
+
+2006-07-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc2-i386-1.
+
+2006-07-12 Keith Owens <kaos@sgi.com>
+
+ * Remove dead KDB_REASON codes.
+ * sparse cleanups.
+ * kdb v4.4-2.6.18-rc1-i386-2.
+
+2006-07-07 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc1-i386-1.
+
+2006-07-04 Keith Owens <kaos@sgi.com>
+
+ * Make KDB rendezvous on i386 a two stage approach.
+ * Clean up generation of KDB interrupt code.
+ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c.
+ * Move setting of interrupt traps to kdbasupport.c.
+ * Remove KDB hooks from arch/i386/kernel smp.c, smpboot.c, i8259.c,
+ io_apic.c.
+ * Add KDB_REASON_CPU_UP support.
+ * Move per cpu setup to kdba_cpu_up().
+ * Rework support for 4K stacks to make backtrace more accurate.
+ * Add BTSP option to get the full backtrace, including kdb routines.
+ * Delete kdba_enable_mce, architectures now do their own setup.
+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr,
+ page_fault_mca. Only ever implemented on x86, difficult to maintain
+ and rarely used in the field.
+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp.
+ * kdb v4.4-2.6.17-i386-2.
+
+2006-06-19 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-i386-1.
+
+2006-05-25 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc5-i386-1.
+
+2006-05-15 Keith Owens <kaos@sgi.com>
+
+ * Refresh bfd related files from binutils 2.16.91.0.2.
+ * kdb v4.4-2.6.17-rc4-i386-2.
+
+2006-05-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc4-i386-1.
+
+2006-04-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc3-i386-1.
+
+2006-04-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc2-i386-1.
+
+2006-04-11 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc1-i386-1.
+
+2006-03-30 Keith Owens <kaos@sgi.com>
+
+ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP.
+ * kdb v4.4-2.6.16-i386-3.
+
+2006-03-24 Keith Owens <kaos@sgi.com>
+
+ * Define a dummy kdba_wait_for_cpus().
+ * kdb v4.4-2.6.16-i386-2.
+
+2006-03-21 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.16-i386-1.
+
+2006-03-14 Nathan Scott <nathans@sgi.com>
+
+ * kdb v4.4-2.6.16-rc6-i386-1.
+
+2006-02-28 Nathan Scott <nathans@sgi.com>
+
+ * kdb v4.4-2.6.16-rc5-i386-1.
+
+2006-02-20 Nathan Scott <nathans@sgi.com>
+
+ * kdb v4.4-2.6.16-rc4-i386-1.
+
+2006-02-06 Keith Owens <kaos@sgi.com>
+
+ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD.
+ * kdb v4.4-2.6.16-rc2-i386-2.
+
+2006-02-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.16-rc2-i386-1.
+
+2006-01-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.16-rc1-i386-1.
+
+2006-01-08 Keith Owens <kaos@sgi.com>
+
+ * Add DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE to notify_die.
+ * kdb v4.4-2.6.15-i386-2.
+
+2006-01-04 Keith Owens <kaos@sgi.com>
+
+ * Remove some inlines and the last vestige of CONFIG_NUMA_REPLICATE.
+ * Read the keyboard acknowledgment after sending a character. SuSE
+ Bugzilla 60240.
+ * kdb v4.4-2.6.15-i386-1.
+
+2005-12-25 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc7-i386-1.
+
+2005-12-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc6-i386-1.
+
+2005-12-05 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc5-i386-1.
+
+2005-12-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc4-i386-1.
+
+2005-11-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc3-i386-1.
+
+2005-11-21 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc2-i386-1.
+
+2005-11-15 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.15-rc1-i386-1.
+
+2005-10-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.14-i386-1.
+
+2005-10-21 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.14-rc5-i386-1.
+
+2005-10-11 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.14-rc4-i386-1.
+
+2005-10-04 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.14-rc3-i386-1.
+
+2005-09-21 Keith Owens <kaos@sgi.com>
+
+ * Support kdb_current_task in register display and modify commands.
+ * kdb v4.4-2.6.14-rc2-i386-1.
+
+2005-09-20 Keith Owens <kaos@sgi.com>
+
+ * Remove use of __STDC_VERSION__ in ansidecl.h.
+ * kdb v4.4-2.6.14-rc1-i386-1.
+
+2005-08-29 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-i386-1.
+
+2005-08-24 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc7-i386-1.
+
+2005-08-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc6-i386-1.
+
+2005-08-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc5-i386-1.
+
+2005-07-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc4-i386-1.
+
+2005-07-22 Keith Owens <kaos@sgi.com>
+
+ * Compile fix for kprobes.
+ * kdb v4.4-2.6.13-rc3-i386-2.
+
+2005-07-19 Keith Owens <kaos@sgi.com>
+
+ * Add support for USB keyboard (OHCI only). Aaron Young, SGI.
+ * kdb v4.4-2.6.13-rc3-i386-1.
+
+2005-07-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc2-i386-1.
+
+2005-07-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.13-rc1-i386-1.
+
+2005-06-19 Keith Owens <kaos@sgi.com>
+
+ * gcc 4 compile fix, remove extern kdb_hardbreaks. Steve Lord.
+ * kdb v4.4-2.6.12-i386-2.
+
+2005-06-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-i386-1.
+
+2005-06-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-rc6-i386-1.
+
+2005-05-25 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-rc5-i386-1.
+
+2005-05-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-rc4-i386-1.
+
+2005-04-21 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-rc3-i386-1.
+
+2005-04-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.12-rc2-i386-1.
+
+2005-03-29 Keith Owens <kaos@sgi.com>
+
+ * Replace __copy_to_user with __copy_to_user_inatomic.
+ * kdb v4.4-2.6.12-rc1-i386-1.
+
+2005-03-08 Keith Owens <kaos@sgi.com>
+
+ * Coexistence patches for lkcd.
+ * kdb v4.4-2.6.11-i386-2.
+
+2005-03-03 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-i386-1.
+
+2005-02-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-rc4-i386-1.
+
+2005-02-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-rc3-bk4-i386-1.
+
+2005-02-03 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-rc3-i386-1.
+
+2005-01-27 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-rc2-i386-1.
+
+2005-01-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.11-rc1-i386-1.
+
+2004-12-25 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.10-i386-1.
+
+2004-12-07 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.10-rc3-i386-1.
+
+2004-11-23 Keith Owens <kaos@sgi.com>
+
+ * Coexist with asmlinkage/fastcall changes.
+ * kdb v4.4-2.6.10-rc2-i386-1.
+
+2004-10-29 Keith Owens <kaos@sgi.com>
+
+ * Handle change defintions for hard and soft irq context.
+ * Make stack switch in kdb backtrace look more like the oops output.
+ * kdb v4.4-2.6.10-rc1-i386-1.
+
+2004-10-19 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.9-i386-1.
+
+2004-10-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.9-rc4-i386-1.
+
+2004-10-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.9-rc3-i386-1.
+
+2004-09-30 Keith Owens <kaos@sgi.com>
+
+ * Add stackdepth command.
+ * Handle backtrace with separate soft and hard irq stacks
+ (CONFIG_4KSTACKS).
+ * Work around RESTORE_ALL macro, which can only be used once.
+ * Export kdba_dumpregs. Bryan Cardillo, UPenn.
+ * kdb v4.4-2.6.9-rc2-i386-2.
+
+2004-09-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.9-rc2-i386-1.
+
+2004-08-27 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.9-rc1-i386-1.
+
+2004-08-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.8-i386-1.
+
+2004-08-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.8-rc4-i386-1.
+
+2004-08-04 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.8-rc3-i386-1.
+
+2004-07-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.8-rc2-i386-1.
+
+2004-07-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.8-rc1-i386-1.
+
+2004-06-16 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.7-i386-1.
+
+2004-06-10 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.7-rc3-i386-1.
+
+2004-06-09 Keith Owens <kaos@sgi.com>
+
+ * Namespace clean up. Mark code/variables as static when it is only
+ used in one file, delete dead code/variables.
+ * kdb v4.4-2.6.7-rc2-i386-3.
+
+2004-06-08 Keith Owens <kaos@sgi.com>
+
+ * Whitespace clean up, no code changes.
+ * kdb v4.4-2.6.7-rc2-i386-2.
+
+2004-06-07 Keith Owens <kaos@sgi.com>
+
+ * Force KALLSYMS and KALLSYMS_ALL for CONFIG_KDB.
+ * kdb v4.4-2.6.7-rc2-i386-1.
+
+2004-06-06 Keith Owens <kaos@sgi.com>
+
+ * Correct Kconfig help text.
+ * Coexist with CONFIG_REGPARM.
+ * Add standard archkdb commands.
+ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h.
+ * kdb v4.4-2.6.6-i386-2.
+
+2004-05-23 Keith Owens <kaos@sgi.com>
+
+ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH).
+ * Update copyright notices.
+ * kdb v4.4-2.6.6-i386-1.
+
+2004-05-10 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6.6-i386-1.
+
+2004-05-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6.6-rc3-i386-1.
+
+2004-05-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6.6-rc2-i386-1.
+
+2004-04-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6.6-rc1-i386-1.
+
+2004-04-05 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6-5-i386-1.
+
+2004-02-29 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6-4-rc1-i386-1.
+
+2004-02-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.6-3-i386-1.
+
+2004-02-17 Keith Owens <kaos@sgi.com>
+
+ * Pick up changes from Jim Houston for 2.6.
+ * Sync with kdb v4.3-2.4.25-rc1-i386-1.
+ * Adjust for LDT changes in i386 mainline.
+ * Convert longjmp buffers from static to dynamic allocation, for large
+ cpu counts.
+ * Do not use USB keyboard if it has not been probed.
+ * Do not print section data, 2.6 kallsyms does not support sections :(.
+ * kdb v4.3-2.6-3-rc3-i386-1.
+
+2003-08-29 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.4.22-i386-1.
+
+2003-08-05 Keith Owens <kaos@sgi.com>
+
+ * Remove duplicate setting of trap for machine_check.
+ * Only reset keyboard when CONFIG_VT_CONSOLE is defined.
+
+2003-07-27 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.4.22-pre8-i386-5.
+
+2003-07-20 Keith Owens <kaos@sgi.com>
+
+ * Remove compile warning on x86 commands.
+ * kdb v4.3-2.4.21-i386-5.
+
+2003-07-08 Keith Owens <kaos@sgi.com>
+
+ * Add new x86 commands - rdv, gdt, idt, ldt, ldtp, ptex.
+ Vamsi Krishna S., IBM.
+ * kdb v4.3-2.4.21-i386-4.
+
+2003-07-01 Keith Owens <kaos@sgi.com>
+
+ * Convert kdba_find_return() to two passes to reduce false positives.
+ * Correct jmp disp8 offset calculation for out of line lock code.
+ * Use NMI for kdb IPI in clustered APIC mode. Sachin Sant, IBM.
+ * kdb v4.3-2.4.21-i386-3.
+
+2003-06-23 Keith Owens <kaos@sgi.com>
+
+ * Sync with XFS 2.4.21 tree.
+ * kdb v4.3-2.4.21-i386-2.
+
+2003-06-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.3-2.4.21-i386-1.
+
+2003-06-20 Keith Owens <kaos@sgi.com>
+
+ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC.
+ * Correct KDB_ENTER() definition.
+ * kdb v4.3-2.4.20-i386-1.
+
+2003-05-02 Keith Owens <kaos@sgi.com>
+
+ * Add kdba_fp_value().
+ * Limit backtrace size to catch loops.
+ * Add read/write access to user pages. Vamsi Krishna S., IBM
+ * Clean up USB keyboard support. Steven Dake.
+ * kdb v4.2-2.4.20-i386-1.
+
+2003-04-04 Keith Owens <kaos@sgi.com>
+
+ * Workarounds for scheduler bugs.
+ * kdb v4.1-2.4.20-i386-1.
+
+2003-03-16 Keith Owens <kaos@sgi.com>
+
+ * Each cpu saves its state as it enters kdb or before it enters code
+ which cannot call kdb, converting kdb from a pull to a push model.
+ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE.
+ * Removal of special cases for i386 backtrace from common code
+ simplifies the architecture code.
+ * Add command to dump i386 struct pt_regs.
+ * kdb v4.0-2.4.20-i386-1.
+
+2003-02-03 Keith Owens <kaos@sgi.com>
+
+ * Register kdb commands early.
+ * Handle KDB_ENTER() when kdb=off.
+ * Optimize __kdba_getarea_size when width is a constant.
+ * Decode oops via kallsyms if it is available.
+ * Update copyright notices to 2003.
+ * Handle call *disp32(%reg) in backtrace.
+ * Correct keyboard freeze. Ashish Kalra.
+ * Add command history and editing. Sonic Zhang.
+ * kdb_toggleled is conditional on KDB_BLINK_LED. Bernhard Fischer.
+ * Allow tab on serial line for symbol completion.
+ * Ignore KDB_ENTER() when kdb is already running.
+ * kdb v3.0-2.4.20-i386-1.
+
+2002-11-29 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.20.
+ * kdb v2.5-2.4.20-i386-1.
+
+2002-11-14 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.20-rc1.
+ * kdb v2.5-2.4.20-rc1-i386-1.
+
+2002-11-14 Keith Owens <kaos@sgi.com>
+
+ * General clean up of handling for breakpoints and single stepping over
+ software breakpoints.
+ * Accept ff 1x as well as ff dx for call *(%reg) in backtrace.
+ * kdb v2.5-2.4.19-i386-1.
+
+2002-11-01 Keith Owens <kaos@sgi.com>
+
+ * Prevent SMP IRQ overwriting KDB_ENTER().
+ * kdb v2.4-2.4.19-i386-2.
+
+2002-10-31 Keith Owens <kaos@sgi.com>
+
+ * Avoid KDB_VECTOR conflict with DUMP_VECTOR.
+ * Remove kdb_eframe_t.
+ * Sanity check if we have pt_regs.
+ * Remove kdba_getcurrentframe().
+ * Reinstate missing nmi_watchdog/kdb hook.
+ * kdb v2.4-2.4.19-i386-1.
+
+2002-10-17 Keith Owens <kaos@sgi.com>
+
+ * Correct compile with CONFIG_VT_CONSOLE=n.
+ * kdb v2.3-2.4.19-i386-5.
+
+2002-10-04 Keith Owens <kaos@sgi.com>
+
+ * Add USB keyboard option.
+ * Minimize differences between patches for 2.4 and 2.5 kernels.
+ * kdb v2.3-2.4.19-i386-4.
+
+2002-08-10 Keith Owens <kaos@sgi.com>
+
+ * Replace kdb_port with kdb_serial to support memory mapped I/O.
+ Note: This needs kdb v2.3-2.4.19-common-2 or later.
+ * kdb v2.3-2.4.19-i386-3.
+
+2002-08-09 Keith Owens <kaos@sgi.com>
+
+ * Use -fno-optimize-sibling-calls for kdb if gcc supports it.
+ * .text.lock does not consume an activation frame.
+ * kdb v2.3-2.4.19-i386-2.
+
+2002-08-07 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19.
+ * Remove individual SGI copyrights, the general SGI copyright applies.
+ * New .text.lock name. Hugh Dickins.
+ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins.
+ * Clean up disassembly layout. Hugh Dickins, Keith Owens.
+ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins.
+ * Better stack layout on bt with no frame pointers. Hugh Dickins.
+ * Make i386 IO breakpoints (bpha <address> IO) work again.
+ Martin Wilck, Keith Owens.
+ * Remove fixed KDB_MAX_COMMANDS size.
+ * Add set_fs() around __copy_to_user on kernel addresses.
+ Randolph Chung.
+ * Position i386 for CONFIG_NUMA_REPLICATE.
+ * kdb v2.3-2.4.19-i386-1.
+
+2002-07-09 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-rc1.
+
+2002-06-14 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-pre10.
+ * kdb v2.1-2.4.19-pre10-i386-1.
+
+2002-04-09 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-pre6.
+ * kdb v2.1-2.4.19-pre6-i386-1.
+
+2002-02-26 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.18.
+ * kdb v2.1-2.4.18-i386-1.
+
+2002-01-18 Keith Owens <kaos@sgi.com>
+
+ * Use new kdb_get/put functions.
+ * Define kdba_{get,put}area_size functions for i386.
+ * Remove over-engineered dblist callback functions.
+ * Correctly handle failing call disp32 in backtrace.
+ * Remove bp_instvalid flag, redundant code.
+ * Remove dead code.
+ * kdb v2.1-2.4.17-i386-1.
+
+2002-01-04 Keith Owens <kaos@sgi.com>
+
+ * Sync xfs <-> kdb i386 code.
+
+2001-12-22 Keith Owens <kaos@sgi.com>
+
+ * Split kdb for i386 as kdb v2.0-2.4.17-i386-1.
--- /dev/null
+++ b/arch/x86/kdb/ChangeLog_64
@@ -0,0 +1,447 @@
+2007-11-08 Jay Lan <jlan@sgi.com>
+
+ * New KDB USB interface, Aaron Young <ayoung@sgi.com>
+ * 1. This patch allows KDB to work with any Host Contoller driver
+ * and call the correct HC driver poll routine (as long as the
+ * HC driver provides a .kdb_poll_char routine via it's
+ * associated hc_driver struct).
+ * 2. Hotplugged keyboards are now recognized by KDB.
+ * 3. Currently KDB can only make use of 1 USB type keyboard.
+ * New code can handle up to 8 attached keyboards - input is
+ * multiplexed from all of them while in kdb.
+ * kdb v4.4-2.6.23-common-2.
+
+2007-10-24 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-x86_64-1.
+
+2007-09-26 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc8-x86_64-1.
+
+2007-09-21 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc7-x86_64-1.
+
+2007-09-12 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc6-x86_64-1.
+
+2007-09-06 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc5-x86_64-1.
+
+2007-08-30 Keith Owens <kaos@sgi.com>
+
+ * New i386/x86_64 backtrace requires that kdb_save_running() does not
+ exit until after kdb_main_loop() has completed.
+ * kdb v4.4-2.6.23-rc4-x86_64-2.
+
+2007-08-30 Jay Lan <jlan@sgi.com>
+
+ * kdb v4.4-2.6.23-rc4-x86_64-1.
+
+2007-08-24 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.23-rc3-x86_64-1.
+
+2007-08-07 Jay Lan <jlan@sgi.com>
+
+ * v4.4-2.6.23-rc2-x86_64-1.
+
+2007-07-31 Keith Owens <kaos@sgi.com>
+
+ * Delete obsolete kdba_bt.c.
+ * kdb v4.4-2.6.23-rc1-x86_64-2.
+
+2007-07-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.23-rc1-x86_64-1.
+
+2007-07-26 Keith Owens <kaos@sgi.com>
+
+ * New x86 backtrace code.
+ * kdb v4.4-2.6.22-x86_64-2.
+
+2007-07-09 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-x86_64-1.
+
+2007-07-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc7-x86_64-1.
+
+2007-06-25 Keith Owens <kaos@sgi.com>
+
+ * Hook into DIE_NMIWATCHDOG.
+ * kdb v4.4-2.6.22-rc5-x86_64-2.
+
+2007-06-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc5-x86_64-1.
+
+2007-06-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc4-x86_64-1.
+
+2007-05-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc3-x86_64-1.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * Register KDBENTER_VECTOR early on the boot cpu.
+ * kdb v4.4-2.6.22-rc2-x86_64-2.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc2-x86_64-1.
+
+2007-05-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.22-rc1-x86_64-1.
+
+2007-05-17 Keith Owens <kaos@sgi.com>
+
+ * Update dumpregs comments for rdmsr and wrmsr commands.
+ Bernardo Innocenti.
+ * kdb v4.4-2.6.21-x86_64-3.
+
+2007-05-15 Keith Owens <kaos@sgi.com>
+
+ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used
+ earlier.
+ * kdb v4.4-2.6.21-x86_64-2.
+
+2007-04-29 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-x86_64-1.
+
+2007-04-16 Keith Owens <kaos@sgi.com>
+
+ * Select KALLSYMS and KALLSYMS_ALL when KDB is selected.
+ * kdb v4.4-2.6.21-rc7-x86_64-2.
+
+2007-04-16 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc7-x86_64-1.
+
+2007-04-10 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc6-x86_64-1.
+
+2007-04-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc5-x86_64-1.
+
+2007-03-19 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc4-x86_64-1.
+
+2007-03-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc3-x86_64-1.
+
+2007-03-14 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc2-x86_64-1.
+
+2007-03-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.21-rc1-x86_64-1.
+
+2007-03-01 Keith Owens <kaos@sgi.com>
+
+ * Remove sparse warnings.
+ * kdb v4.4-2.6.20-x86_64-3.
+
+2007-02-16 Keith Owens <kaos@sgi.com>
+
+ * Initialise variable bits of struct disassemble_info each time.
+ * kdb v4.4-2.6.20-x86_64-2.
+
+2007-02-06 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-x86_64-1.
+
+2007-02-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc7-x86_64-1.
+
+2007-01-10 Keith Owens <kaos@sgi.com>
+
+ * Correct setjmp for the FRAME_POINTER=y case.
+ * Remove duplicate longjmp code for FRAME_POINTER=n/y.
+ * kdb v4.4-2.6.20-rc4-x86_64-2.
+
+2007-01-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc4-x86_64-1.
+
+2007-01-02 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc3-x86_64-1.
+
+2006-12-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.20-rc1-x86_64-1.
+
+2006-12-07 Keith Owens <kaos@sgi.com>
+
+ * Export kdba_dumpregs.
+ * kdb v4.4-2.6.19-x86_64-2.
+
+2006-11-30 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-x86_64-1.
+
+2006-11-27 Keith Owens <kaos@sgi.com>
+
+ * Only use VT keyboard if the command line allows it and ACPI indicates
+ that there is an i8042.
+ * kdb v4.4-2.6.19-rc6-x86_64-2.
+
+2006-11-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc6-x86_64-1.
+
+2006-11-09 Keith Owens <kaos@sgi.com>
+
+ * Only use VT console if the command line allows it.
+ * kdb v4.4-2.6.19-rc5-x86_64-2.
+
+2006-11-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc5-x86_64-1.
+
+2006-11-01 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc4-x86_64-1.
+
+2006-10-24 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc3-x86_64-1.
+
+2006-10-24 Keith Owens <kaos@sgi.com>
+
+ * Remove redundant regs and envp parameters.
+ * kdb v4.4-2.6.19-rc2-x86_64-2.
+
+2006-10-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.19-rc2-x86_64-1.
+
+2006-10-11 Keith Owens <kaos@sgi.com>
+
+ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it
+ simplifies the code and disables interrupts on KDB_ENTER().
+ * Exclude the KDBENTER_VECTOR from irq assignment.
+ * Enable KDB_ENTER() again.
+ * kdb v4.4-2.6.19-rc1-x86_64-2.
+
+2006-10-09 Keith Owens <kaos@sgi.com>
+
+ * KDB_ENTER() is getting spurious activations on some x86_64 hardware.
+ Deactivate KDB_ENTER() until it is fixed.
+ * kdb v4.4-2.6.19-rc1-x86_64-1.
+
+2006-10-06 Keith Owens <kaos@sgi.com>
+
+ * Remove #include <linux/config.h>
+ * kdb v4.4-2.6.18-x86_64-2.
+
+2006-09-20 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-x86_64-1.
+
+2006-09-15 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc7-x86_64-1.
+
+2006-08-30 Keith Owens <kaos@sgi.com>
+
+ * Do not print debugstackptr in cpu_pda, it will be deleted soon.
+ * Add KDB_ENTER().
+ * Add warning for problems when following alternate stacks.
+ * kdb v4.4-2.6.18-rc5-x86_64-3.
+
+2006-08-29 Keith Owens <kaos@sgi.com>
+
+ * Rewrite all backtrace code.
+ * Add pt_regs and cpu_pda commands.
+ * Include patch to define orig_ist, to be removed once that patch is in
+ the community tree.
+ * kdb v4.4-2.6.18-rc5-x86_64-2.
+
+2006-08-28 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc5-x86_64-1.
+
+2006-08-08 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc4-x86_64-1.
+
+2006-08-04 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc3-x86_64-1.
+
+2006-07-18 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc2-x86_64-1.
+
+2006-07-12 Keith Owens <kaos@sgi.com>
+
+ * sparse cleanups
+ * kdb v4.4-2.6.18-rc1-x86_64-2.
+
+2006-07-07 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.18-rc1-x86_64-1.
+
+2006-07-04 Keith Owens <kaos@sgi.com>
+
+ * Make KDB rendezvous on x86_64 a two stage approach.
+ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c.
+ * Move setting of interrupt traps to kdbasupport.c.
+ * Add KDB_REASON_CPU_UP support.
+ * Move per cpu setup to kdba_cpu_up().
+ * Delete kdba_enable_mce, architectures now do their own setup.
+ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr,
+ page_fault_mca. Only ever implemented on x86, difficult to maintain
+ and rarely used in the field.
+ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp.
+ * kdb v4.4-2.6.17-x86_64-2.
+
+2006-06-19 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-x86_64-1.
+
+2006-05-31 Keith Owens <kaos@sgi.com>
+
+ * Define arch/x86_64/kdb/kdb_cmds.
+ * kdb v4.4-2.6.17-rc5-x86_64-2.
+
+2006-05-25 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6.17-rc5-x86_64-1.
+
+2006-05-15 Keith Owens <kaos@sgi.com>
+
+ * Refresh bfd related files from binutils 2.16.91.0.2.
+ * kdb v4.4-2.6.17-rc4-x86_64-2.
+
+2006-05-12 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6-17-rc4-x86_64-1.
+
+2006-04-22 Keith Owens <kaos@sgi.com>
+
+ * kdb v4.4-2.6-17-rc2-x86_64-1.
+
+2006-04-13 Keith Owens <kaos@sgi.com>
+
+ * Remove trailing white space.
+ * kdb v4.4-2.6-17-rc1-x86_64-1.
+
+2006-03-25 Jack F. Vogel <jfv@bluesong.net>
+ * Sync with Keith's changes for 2.6.16
+ * code from Andi Kleen to support above
+
+2005-09-30 Jack F. Vogel <jfv@bluesong.net>
+ * Port to 2.6.14-rc2
+ * sync with a couple changes from Keith
+ * Add backtrace code from Jim Houston
+ (thanks Jim)
+
+2005-08-31 Jack F. Vogel <jfv@bluesong.net>
+ * Change to linker script for kexec
+ thanks to Steven Dake <sdake@mvista.com>
+
+2005-08-30 Jack F. Vogel <jfv@bluesong.net>
+ * Notify struct should not be devinit
+ thanks IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
+
+2005-08-25 Jack F. Vogel <jfv@bluesong.net>
+ * Update to 2.6.11
+ * Fix to synchronize with the notify changes
+ thanks to Jim Houston.
+
+2004-09-30 Keith Owens <kaos@sgi.com>
+ * Port to 2.6.9-rc2
+ * Fix line editting characters. Jim Houston, Comcast.
+ * kdb v4.4-2.6.9-rc2-x86-64-1.
+
+2004-08-15 Jack F. Vogel <jfv@bluesong.net>
+ * Port to 2.6.8
+ * tighten up the code, using the built-in
+ die_chain notify interface, thanks to
+ Andi Kleen for pointing this out.
+
+2004-05-15 Jack F. Vogel <jfv@bluesong.net>
+ * port to 2.6.6 for x86_64
+
+2003-12-15 Cliff Neighbors <cliff@fabric7.com>
+ * initial port from i386 to x86_64
+
+2002-08-10 Keith Owens <kaos@sgi.com>
+
+ * Replace kdb_port with kdb_serial to support memory mapped I/O.
+ Note: This needs kdb v2.3-2.4.19-common-2 or later.
+ * kdb v2.3-2.4.19-i386-3.
+
+2002-08-09 Keith Owens <kaos@sgi.com>
+
+ * Use -fno-optimize-sibling-calls for kdb if gcc supports it.
+ * .text.lock does not consume an activation frame.
+ * kdb v2.3-2.4.19-i386-2.
+
+2002-08-07 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19.
+ * Remove individual SGI copyrights, the general SGI copyright applies.
+ * New .text.lock name. Hugh Dickins.
+ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins.
+ * Clean up disassembly layout. Hugh Dickins, Keith Owens.
+ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins.
+ * Better stack layout on bt with no frame pointers. Hugh Dickins.
+ * Make i386 IO breakpoints (bpha <address> IO) work again.
+ Martin Wilck, Keith Owens.
+ * Remove fixed KDB_MAX_COMMANDS size.
+ * Add set_fs() around __copy_to_user on kernel addresses.
+ Randolph Chung.
+ * Position i386 for CONFIG_NUMA_REPLICATE.
+ * kdb v2.3-2.4.19-i386-1.
+
+2002-07-09 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-rc1.
+
+2002-06-14 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-pre10.
+ * kdb v2.1-2.4.19-pre10-i386-1.
+
+2002-04-09 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.19-pre6.
+ * kdb v2.1-2.4.19-pre6-i386-1.
+
+2002-02-26 Keith Owens <kaos@sgi.com>
+
+ * Upgrade to 2.4.18.
+ * kdb v2.1-2.4.18-i386-1.
+
+2002-01-18 Keith Owens <kaos@sgi.com>
+
+ * Use new kdb_get/put functions.
+ * Define kdba_{get,put}area_size functions for i386.
+ * Remove over-engineered dblist callback functions.
+ * Correctly handle failing call disp32 in backtrace.
+ * Remove bp_instvalid flag, redundant code.
+ * Remove dead code.
+ * kdb v2.1-2.4.17-i386-1.
+
+2002-01-04 Keith Owens <kaos@sgi.com>
+
+ * Sync xfs <-> kdb i386 code.
+
--- /dev/null
+++ b/arch/x86/kdb/Makefile
@@ -0,0 +1,29 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+
+obj-$(CONFIG_KDB) += kdba_bp.o x86-dis.o kdba_bt.o \
+ kdba_io.o kdba_id.o kdba_support.o
+
+ifneq (,$(findstring -fno-optimize-sibling-calls,$(KBUILD_CFLAGS)))
+ CFLAGS_kdba_bt.o += -DNO_SIBLINGS
+endif
+
+REGPARM := $(subst -mregparm=,,$(filter -mregparm=%,$(KBUILD_CFLAGS)))
+ifeq (,$(REGPARM))
+ifeq ($(CONFIG_X86_32),y)
+ REGPARM := 3
+else
+ REGPARM := 6
+endif
+endif
+
+CFLAGS_kdba_bt.o += -DREGPARM=$(REGPARM) -DCCVERSION="$(CCVERSION)"
+
+override CFLAGS := $(CFLAGS:%-pg=% )
+
+CFLAGS_kdba_io.o += -I $(TOPDIR)/arch/$(SRCARCH)/kdb
--- /dev/null
+++ b/arch/x86/kdb/kdb_cmds_32
@@ -0,0 +1,17 @@
+# Standard architecture specific commands for kdb.
+# These commands are appended to those in kdb/kdb_cmds, see that file for
+# restrictions.
+
+# Standard debugging information for first level support, invoked from archkdb*
+# commands that are defined in kdb/kdb_cmds.
+
+defcmd archkdbcommon "" "Common arch debugging"
+ set LINES 2000000
+ set BTAPROMPT 0
+ -summary
+ -id %eip-24
+ -cpu
+ -ps
+ -dmesg 600
+ -bt
+endefcmd
--- /dev/null
+++ b/arch/x86/kdb/kdb_cmds_64
@@ -0,0 +1,18 @@
+# Standard architecture specific commands for kdb.
+# These commands are appended to those in kdb/kdb_cmds, see that file for
+# restrictions.
+
+# Standard debugging information for first level support, invoked from archkdb*
+# commands that are defined in kdb/kdb_cmds.
+
+defcmd archkdbcommon "" "Common arch debugging"
+ set LINES 2000000
+ set BTAPROMPT 0
+ -summary
+ -id %rip-24
+ -cpu
+ -ps
+ -dmesg 600
+ -bt
+ -cpu_pda *
+endefcmd
--- /dev/null
+++ b/arch/x86/kdb/kdba_bp.c
@@ -0,0 +1,914 @@
+/*
+ * Kernel Debugger Architecture Dependent Breakpoint Handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+
+
+static char *kdba_rwtypes[] = { "Instruction(Register)", "Data Write",
+ "I/O", "Data Access"};
+
+/*
+ * Table describing processor architecture hardware
+ * breakpoint registers for every CPU.
+ */
+
+static kdbhard_bp_t kdb_hardbreaks[NR_CPUS][KDB_MAXHARDBPT];
+
+/*
+ * kdba_db_trap
+ *
+ * Perform breakpoint processing upon entry to the
+ * processor debugger fault. Determine and print
+ * the active breakpoint.
+ *
+ * Parameters:
+ * regs Exception frame containing machine register state
+ * error Error number passed to kdb.
+ * Outputs:
+ * None.
+ * Returns:
+ * KDB_DB_BPT Standard instruction or data breakpoint encountered
+ * KDB_DB_SS Single Step fault ('ss' command or end of 'ssb' command)
+ * KDB_DB_SSB Single Step fault, caller should continue ('ssb' command)
+ * KDB_DB_SSBPT Single step over breakpoint
+ * KDB_DB_NOBPT No existing kdb breakpoint matches this debug exception
+ * Locking:
+ * None.
+ * Remarks:
+ * Yup, there be goto's here.
+ *
+ * If multiple processors receive debug exceptions simultaneously,
+ * one may be waiting at the kdb fence in kdb() while the user
+ * issues a 'bc' command to clear the breakpoint the processor
+ * which is waiting has already encountered. If this is the case,
+ * the debug registers will no longer match any entry in the
+ * breakpoint table, and we'll return the value KDB_DB_NOBPT.
+ * This can cause a panic in die_if_kernel(). It is safer to
+ * disable the breakpoint (bd), go until all processors are past
+ * the breakpoint then clear the breakpoint (bc). This code
+ * recognises a breakpoint even when disabled but not when it has
+ * been cleared.
+ *
+ * WARNING: This routine clears the debug state. It should be called
+ * once per debug and the result cached.
+ */
+
+kdb_dbtrap_t
+kdba_db_trap(struct pt_regs *regs, int error_unused)
+{
+ kdb_machreg_t dr6;
+ kdb_machreg_t dr7;
+ int rw, reg;
+ int i;
+ kdb_dbtrap_t rv = KDB_DB_BPT;
+ kdb_bp_t *bp;
+ int cpu = smp_processor_id();
+
+ if (KDB_NULL_REGS(regs))
+ return KDB_DB_NOBPT;
+
+ dr6 = kdba_getdr6();
+ dr7 = kdba_getdr7();
+
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdb: dr6 0x%lx dr7 0x%lx\n", dr6, dr7);
+ if (dr6 & DR6_BS) {
+ if (KDB_STATE(SSBPT)) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("ssbpt\n");
+ KDB_STATE_CLEAR(SSBPT);
+ for(i=0,bp=kdb_breakpoints;
+ i < KDB_MAXBPT;
+ i++, bp++) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("bp 0x%p enabled %d delayed %d global %d cpu %d\n",
+ bp, bp->bp_enabled, bp->bp_delayed, bp->bp_global, bp->bp_cpu);
+ if (!bp->bp_enabled)
+ continue;
+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
+ continue;
+ if (KDB_DEBUG(BP))
+ kdb_printf("bp for this cpu\n");
+ if (bp->bp_delayed) {
+ bp->bp_delayed = 0;
+ if (KDB_DEBUG(BP)){
+ /* Can't be hw breakpoint */
+ if (bp->bp_hardtype)
+ kdb_printf("kdb: Error - hw bp delayed\n");
+ kdb_printf("kdba_installbp\n");
+ }
+ kdba_installbp(regs, bp);
+ if (!KDB_STATE(DOING_SS)) {
+ regs->flags &= ~X86_EFLAGS_TF;
+ return(KDB_DB_SSBPT);
+ }
+ break;
+ }
+ }
+ if (i == KDB_MAXBPT) {
+ kdb_printf("kdb: Unable to find delayed breakpoint\n");
+ }
+ if (!KDB_STATE(DOING_SS)) {
+ regs->flags &= ~X86_EFLAGS_TF;
+ return(KDB_DB_NOBPT);
+ }
+ /* FALLTHROUGH */
+ }
+
+ /*
+ * KDB_STATE_DOING_SS is set when the kernel debugger is using
+ * the processor trap flag to single-step a processor. If a
+ * single step trap occurs and this flag is clear, the SS trap
+ * will be ignored by KDB and the kernel will be allowed to deal
+ * with it as necessary (e.g. for ptrace).
+ */
+ if (!KDB_STATE(DOING_SS))
+ goto unknown;
+
+ /* single step */
+ rv = KDB_DB_SS; /* Indicate single step */
+ if (KDB_STATE(DOING_SSB)) {
+ unsigned char instruction[2];
+
+ kdb_id1(regs->ip);
+ if (kdb_getarea(instruction, regs->ip) ||
+ (instruction[0]&0xf0) == 0xe0 || /* short disp jumps */
+ (instruction[0]&0xf0) == 0x70 || /* Misc. jumps */
+ instruction[0] == 0xc2 || /* ret */
+ instruction[0] == 0x9a || /* call */
+ (instruction[0]&0xf8) == 0xc8 || /* enter, leave, iret, int, */
+ ((instruction[0] == 0x0f) &&
+ ((instruction[1]&0xf0)== 0x80))
+ ) {
+ /*
+ * End the ssb command here.
+ */
+ KDB_STATE_CLEAR(DOING_SSB);
+ KDB_STATE_CLEAR(DOING_SS);
+ } else {
+ rv = KDB_DB_SSB; /* Indicate ssb - dismiss immediately */
+ }
+ } else {
+ /*
+ * Print current insn
+ */
+ kdb_printf("SS trap at ");
+ kdb_symbol_print(regs->ip, NULL, KDB_SP_DEFAULT|KDB_SP_NEWLINE);
+ kdb_id1(regs->ip);
+ KDB_STATE_CLEAR(DOING_SS);
+ }
+
+ if (rv != KDB_DB_SSB)
+ regs->flags &= ~X86_EFLAGS_TF;
+ }
+
+ if (dr6 & DR6_B0) {
+ rw = DR7_RW0(dr7);
+ reg = 0;
+ goto handle;
+ }
+
+ if (dr6 & DR6_B1) {
+ rw = DR7_RW1(dr7);
+ reg = 1;
+ goto handle;
+ }
+
+ if (dr6 & DR6_B2) {
+ rw = DR7_RW2(dr7);
+ reg = 2;
+ goto handle;
+ }
+
+ if (dr6 & DR6_B3) {
+ rw = DR7_RW3(dr7);
+ reg = 3;
+ goto handle;
+ }
+
+ if (rv > 0)
+ goto handled;
+
+ goto unknown; /* dismiss */
+
+handle:
+ /*
+ * Set Resume Flag
+ */
+ regs->flags |= X86_EFLAGS_RF;
+
+ /*
+ * Determine which breakpoint was encountered.
+ */
+ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
+ if (!(bp->bp_free)
+ && (bp->bp_global || bp->bp_cpu == smp_processor_id())
+ && (bp->bp_hard[cpu])
+ && (bp->bp_hard[cpu]->bph_reg == reg)) {
+ /*
+ * Hit this breakpoint.
+ */
+ kdb_printf("%s breakpoint #%d at " kdb_bfd_vma_fmt "\n",
+ kdba_rwtypes[rw],
+ i, bp->bp_addr);
+
+ /*
+ * For an instruction breakpoint, disassemble
+ * the current instruction.
+ */
+ if (rw == 0) {
+ kdb_id1(regs->ip);
+ }
+
+ goto handled;
+ }
+ }
+
+unknown:
+ regs->flags |= X86_EFLAGS_RF; /* Supress further faults */
+ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
+
+handled:
+
+ /*
+ * Clear the pending exceptions.
+ */
+ kdba_putdr6(0);
+
+ return rv;
+}
+
+/*
+ * kdba_bp_trap
+ *
+ * Perform breakpoint processing upon entry to the
+ * processor breakpoint instruction fault. Determine and print
+ * the active breakpoint.
+ *
+ * Parameters:
+ * regs Exception frame containing machine register state
+ * error Error number passed to kdb.
+ * Outputs:
+ * None.
+ * Returns:
+ * 0 Standard instruction or data breakpoint encountered
+ * 1 Single Step fault ('ss' command)
+ * 2 Single Step fault, caller should continue ('ssb' command)
+ * 3 No existing kdb breakpoint matches this debug exception
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * If multiple processors receive debug exceptions simultaneously,
+ * one may be waiting at the kdb fence in kdb() while the user
+ * issues a 'bc' command to clear the breakpoint the processor which
+ * is waiting has already encountered. If this is the case, the
+ * debug registers will no longer match any entry in the breakpoint
+ * table, and we'll return the value '3'. This can cause a panic
+ * in die_if_kernel(). It is safer to disable the breakpoint (bd),
+ * 'go' until all processors are past the breakpoint then clear the
+ * breakpoint (bc). This code recognises a breakpoint even when
+ * disabled but not when it has been cleared.
+ *
+ * WARNING: This routine resets the ip. It should be called
+ * once per breakpoint and the result cached.
+ */
+
+kdb_dbtrap_t
+kdba_bp_trap(struct pt_regs *regs, int error_unused)
+{
+ int i;
+ kdb_dbtrap_t rv;
+ kdb_bp_t *bp;
+
+ if (KDB_NULL_REGS(regs))
+ return KDB_DB_NOBPT;
+
+ /*
+ * Determine which breakpoint was encountered.
+ */
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdba_bp_trap: ip=0x%lx (not adjusted) "
+ "flags=0x%lx regs=0x%p sp=0x%lx\n",
+ regs->ip, regs->flags, regs, regs->sp);
+
+ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
+
+ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
+ if (bp->bp_free)
+ continue;
+ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
+ continue;
+ if ((void *)bp->bp_addr == (void *)(regs->ip - bp->bp_adjust)) {
+ /* Hit this breakpoint. */
+ regs->ip -= bp->bp_adjust;
+ kdb_printf("Instruction(i) breakpoint #%d at 0x%lx (adjusted)\n",
+ i, regs->ip);
+ kdb_id1(regs->ip);
+ rv = KDB_DB_BPT;
+ bp->bp_delay = 1;
+ /* SSBPT is set when the kernel debugger must single
+ * step a task in order to re-establish an instruction
+ * breakpoint which uses the instruction replacement
+ * mechanism. It is cleared by any action that removes
+ * the need to single-step the breakpoint.
+ */
+ KDB_STATE_SET(SSBPT);
+ break;
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * kdba_handle_bp
+ *
+ * Handle an instruction-breakpoint trap. Called when re-installing
+ * an enabled breakpoint which has has the bp_delay bit set.
+ *
+ * Parameters:
+ * Returns:
+ * Locking:
+ * Remarks:
+ *
+ * Ok, we really need to:
+ * 1) Restore the original instruction byte
+ * 2) Single Step
+ * 3) Restore breakpoint instruction
+ * 4) Continue.
+ *
+ *
+ */
+
+static void
+kdba_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ if (KDB_NULL_REGS(regs))
+ return;
+
+ if (KDB_DEBUG(BP))
+ kdb_printf("regs->ip = 0x%lx\n", regs->ip);
+
+ /*
+ * Setup single step
+ */
+ kdba_setsinglestep(regs);
+
+ /*
+ * Reset delay attribute
+ */
+ bp->bp_delay = 0;
+ bp->bp_delayed = 1;
+}
+
+
+/*
+ * kdba_bptype
+ *
+ * Return a string describing type of breakpoint.
+ *
+ * Parameters:
+ * bph Pointer to hardware breakpoint description
+ * Outputs:
+ * None.
+ * Returns:
+ * Character string.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+char *
+kdba_bptype(kdbhard_bp_t *bph)
+{
+ char *mode;
+
+ mode = kdba_rwtypes[bph->bph_mode];
+
+ return mode;
+}
+
+/*
+ * kdba_printbpreg
+ *
+ * Print register name assigned to breakpoint
+ *
+ * Parameters:
+ * bph Pointer hardware breakpoint structure
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+static void
+kdba_printbpreg(kdbhard_bp_t *bph)
+{
+ kdb_printf(" in dr%ld", bph->bph_reg);
+}
+
+/*
+ * kdba_printbp
+ *
+ * Print string describing hardware breakpoint.
+ *
+ * Parameters:
+ * bph Pointer to hardware breakpoint description
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+void
+kdba_printbp(kdb_bp_t *bp)
+{
+ int cpu;
+
+ kdb_printf("\n is enabled");
+ if (bp->bp_hardtype) {
+ if (bp->bp_global)
+ cpu = smp_processor_id();
+ else
+ cpu = bp->bp_cpu;
+ kdba_printbpreg(bp->bp_hard[cpu]);
+ if (bp->bp_hard[cpu]->bph_mode != 0) {
+ kdb_printf(" for %d bytes",
+ bp->bp_hard[cpu]->bph_length+1);
+ }
+ }
+}
+
+/*
+ * kdba_parsebp
+ *
+ * Parse architecture dependent portion of the
+ * breakpoint command.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic for failure
+ * Locking:
+ * None.
+ * Remarks:
+ * for Ia32 architure, data access, data write and
+ * I/O breakpoints are supported in addition to instruction
+ * breakpoints.
+ *
+ * {datar|dataw|io|inst} [length]
+ */
+
+int
+kdba_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
+{
+ int nextarg = *nextargp;
+ int diag;
+ kdbhard_bp_t *bph = &bp->bp_template;
+
+ bph->bph_mode = 0; /* Default to instruction breakpoint */
+ bph->bph_length = 0; /* Length must be zero for insn bp */
+ if ((argc + 1) != nextarg) {
+ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) {
+ bph->bph_mode = 3;
+ } else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) {
+ bph->bph_mode = 1;
+ } else if (strnicmp(argv[nextarg], "io", sizeof("io")) == 0) {
+ bph->bph_mode = 2;
+ } else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) {
+ bph->bph_mode = 0;
+ } else {
+ return KDB_ARGCOUNT;
+ }
+
+ bph->bph_length = 3; /* Default to 4 byte */
+
+ nextarg++;
+
+ if ((argc + 1) != nextarg) {
+ unsigned long len;
+
+ diag = kdbgetularg((char *)argv[nextarg],
+ &len);
+ if (diag)
+ return diag;
+
+
+ if ((len > 4) || (len == 3))
+ return KDB_BADLENGTH;
+
+ bph->bph_length = len;
+ bph->bph_length--; /* Normalize for debug register */
+ nextarg++;
+ }
+
+ if ((argc + 1) != nextarg)
+ return KDB_ARGCOUNT;
+
+ /*
+ * Indicate to architecture independent level that
+ * a hardware register assignment is required to enable
+ * this breakpoint.
+ */
+
+ bph->bph_free = 0;
+ } else {
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdba_bp: no args, forcehw is %d\n", bp->bp_forcehw);
+ if (bp->bp_forcehw) {
+ /*
+ * We are forced to use a hardware register for this
+ * breakpoint because either the bph or bpha
+ * commands were used to establish this breakpoint.
+ */
+ bph->bph_free = 0;
+ } else {
+ /*
+ * Indicate to architecture dependent level that
+ * the instruction replacement breakpoint technique
+ * should be used for this breakpoint.
+ */
+ bph->bph_free = 1;
+ bp->bp_adjust = 1; /* software, int 3 is one byte */
+ }
+ }
+
+ if (bph->bph_mode != 2 && kdba_verify_rw(bp->bp_addr, bph->bph_length+1)) {
+ kdb_printf("Invalid address for breakpoint, ignoring bp command\n");
+ return KDB_BADADDR;
+ }
+
+ *nextargp = nextarg;
+ return 0;
+}
+
+/*
+ * kdba_allocbp
+ *
+ * Allocate hw register for bp on specific CPU
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * A pointer to the allocated register kdbhard_bp_t structure for
+ * success, Null and a non-zero diagnostic for failure.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+static kdbhard_bp_t *
+kdba_allocbp(kdbhard_bp_t *bph, int *diagp, unsigned int cpu)
+{
+ int i;
+ kdbhard_bp_t *newbph;
+
+ for(i=0; i < KDB_MAXHARDBPT; i++) {
+ newbph=&(kdb_hardbreaks[cpu][i]);
+ if (newbph->bph_free) {
+ break;
+ }
+ }
+
+ if (i == KDB_MAXHARDBPT) {
+ *diagp = KDB_TOOMANYDBREGS;
+ return NULL;
+ }
+
+ *diagp = 0;
+
+ /*
+ * Copy data from template. Can't just copy the entire template
+ * here because the register number in kdb_hardbreaks must be
+ * preserved.
+ */
+ newbph->bph_data = bph->bph_data;
+ newbph->bph_write = bph->bph_write;
+ newbph->bph_mode = bph->bph_mode;
+ newbph->bph_length = bph->bph_length;
+
+ /*
+ * Mark entry allocated.
+ */
+ newbph->bph_free = 0;
+
+ return newbph;
+}
+
+/*
+ * kdba_alloc_hwbp
+ *
+ * Associate a hardware registers with a breakpoint.
+ * If hw bp is global hw registers descriptor will be allocated
+ * on every CPU.
+ *
+ * Parameters:
+ * bp - hardware bp
+ * diagp - pointer to variable that will store error when
+ * function complete
+ * Outputs:
+ * None.
+ * Returns:
+ * None
+ * Locking:
+ * None.
+ * Remarks:
+ * Should be called with correct bp->bp_template
+ */
+
+void
+kdba_alloc_hwbp(kdb_bp_t *bp, int *diagp)
+{
+ int i;
+
+ if (bp->bp_global){
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+ bp->bp_hard[i] = kdba_allocbp(&bp->bp_template, diagp, i);
+ if (*diagp)
+ break;
+ }
+ } else {
+ bp->bp_hard[bp->bp_cpu] = kdba_allocbp(&bp->bp_template, diagp, bp->bp_cpu);
+ }
+ bp->bp_hardtype = 1;
+}
+
+/*
+ * kdba_freebp
+ *
+ * Deallocate hw registers descriptor for bp on specific CPU
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic for failure
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+static void
+kdba_freebp(kdbhard_bp_t *bph)
+{
+ bph->bph_free = 1;
+}
+
+/*
+ * kdba_free_hwbp
+ *
+ * Frees allocated hw registers descriptors for bp.
+ * If hw bp is global, hw registers descriptors will be freed
+ * on every CPU.
+ *
+ * Parameters:
+ * bp - hardware bp
+ * Outputs:
+ * None.
+ * Returns:
+ * None
+ * Locking:
+ * None.
+ * Remarks:
+ * Should be called with correct bp->bp_template
+ */
+
+void
+kdba_free_hwbp(kdb_bp_t *bp)
+{
+ int i;
+
+ /* When kernel enters KDB, first, all local bps
+ * are removed, so here we don't need to clear
+ * debug registers.
+ */
+
+ if (bp->bp_global){
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+ if (bp->bp_hard[i])
+ kdba_freebp(bp->bp_hard[i]);
+ bp->bp_hard[i] = 0;
+ }
+ } else {
+ kdba_freebp(bp->bp_hard[bp->bp_cpu]);
+ bp->bp_hard[bp->bp_cpu] = NULL;
+ }
+ bp->bp_hardtype = 0;
+}
+
+/*
+ * kdba_initbp
+ *
+ * Initialize the breakpoint table for the hardware breakpoint
+ * register.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic for failure
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * There is one entry per register. On the ia32 architecture
+ * all the registers are interchangeable, so no special allocation
+ * criteria are required.
+ */
+
+void
+kdba_initbp(void)
+{
+ int i,j;
+ kdbhard_bp_t *bph;
+
+ /*
+ * Clear the hardware breakpoint table
+ */
+
+ memset(kdb_hardbreaks, '\0', sizeof(kdb_hardbreaks));
+
+ for (i = 0; i < NR_CPUS; ++i) {
+ /* Called early so we don't know actual
+ * ammount of CPUs
+ */
+ for(j=0; j < KDB_MAXHARDBPT; j++) {
+ bph=&(kdb_hardbreaks[i][j]);
+ bph->bph_reg = j;
+ bph->bph_free = 1;
+ }
+ }
+}
+
+/*
+ * kdba_installbp
+ *
+ * Install a breakpoint
+ *
+ * Parameters:
+ * regs Exception frame
+ * bp Breakpoint structure for the breakpoint to be installed
+ * Outputs:
+ * None.
+ * Returns:
+ * 0 if breakpoint installed.
+ * Locking:
+ * None.
+ * Remarks:
+ * For hardware breakpoints, a debug register is allocated
+ * and assigned to the breakpoint. If no debug register is
+ * available, a warning message is printed and the breakpoint
+ * is disabled.
+ *
+ * For instruction replacement breakpoints, we must single-step
+ * over the replaced instruction at this point so we can re-install
+ * the breakpoint instruction after the single-step. SSBPT is set
+ * when the breakpoint is initially hit and is cleared by any action
+ * that removes the need for single-step over the breakpoint.
+ */
+
+int
+kdba_installbp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * Install the breakpoint, if it is not already installed.
+ */
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("kdba_installbp bp_installed %d\n", bp->bp_installed);
+ }
+ if (!KDB_STATE(SSBPT))
+ bp->bp_delay = 0;
+
+ if (bp->bp_hardtype) {
+ if (KDB_DEBUG(BP) && !bp->bp_global && cpu != bp->bp_cpu){
+ kdb_printf("kdba_installbp: cpu != bp->bp_cpu for local hw bp\n");
+ }
+
+ if (KDB_DEBUG(BP) && !bp->bp_hard[cpu]){
+ kdb_printf("kdba_installbp: Error - bp_hard[smp_processor_id()] is emply\n");
+ return 1;
+ }
+
+ if (!bp->bp_hard[cpu]->bph_installed){
+ kdba_installdbreg(bp);
+ bp->bp_hard[cpu]->bph_installed = 1;
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n",
+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr);
+ }
+ }
+ } else if (!bp->bp_installed) {
+ if (bp->bp_delay) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdba_installbp delayed bp\n");
+ kdba_handle_bp(regs, bp);
+ } else {
+ if (kdb_getarea_size(&(bp->bp_inst), bp->bp_addr, 1) ||
+ kdb_putword(bp->bp_addr, IA32_BREAKPOINT_INSTRUCTION, 1)) {
+ kdb_printf("kdba_installbp failed to set software breakpoint at " kdb_bfd_vma_fmt "\n", bp->bp_addr);
+ return(1);
+ }
+ bp->bp_installed = 1;
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdba_installbp instruction 0x%x at " kdb_bfd_vma_fmt "\n",
+ IA32_BREAKPOINT_INSTRUCTION, bp->bp_addr);
+ }
+ }
+ return(0);
+}
+
+/*
+ * kdba_removebp
+ *
+ * Make a breakpoint ineffective.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+int
+kdba_removebp(kdb_bp_t *bp)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * For hardware breakpoints, remove it from the active register,
+ * for software breakpoints, restore the instruction stream.
+ */
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed);
+ }
+
+ if (bp->bp_hardtype) {
+ if (KDB_DEBUG(BP) && !bp->bp_global && cpu != bp->bp_cpu){
+ kdb_printf("kdba_removebp: cpu != bp->bp_cpu for local hw bp\n");
+ }
+
+ if (KDB_DEBUG(BP) && !bp->bp_hard[cpu]){
+ kdb_printf("kdba_removebp: Error - bp_hard[smp_processor_id()] is emply\n");
+ return 1;
+ }
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt "\n",
+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr);
+ }
+
+ if (bp->bp_hard[cpu]->bph_installed){
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n",
+ bp->bp_hard[cpu]->bph_reg, bp->bp_addr);
+ }
+ kdba_removedbreg(bp);
+ bp->bp_hard[cpu]->bph_installed = 0;
+ }
+ } else if (bp->bp_installed) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("kdb: restoring instruction 0x%x at " kdb_bfd_vma_fmt "\n",
+ bp->bp_inst, bp->bp_addr);
+ if (kdb_putword(bp->bp_addr, bp->bp_inst, 1))
+ return(1);
+ bp->bp_installed = 0;
+ }
+ return(0);
+}
--- /dev/null
+++ b/arch/x86/kdb/kdba_bt.c
@@ -0,0 +1,5758 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2006, 2007-2009 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Common code for doing accurate backtraces on i386 and x86_64, including
+ * printing the values of arguments.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/nmi.h>
+#include <asm/asm-offsets.h>
+#include <asm/system.h>
+
+#define KDB_DEBUG_BB(fmt, ...) \
+ {if (KDB_DEBUG(BB)) kdb_printf(fmt, ## __VA_ARGS__);}
+#define KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix) \
+ kdb_printf(prefix "%c0x%x" suffix, \
+ offset >= 0 ? '+' : '-', \
+ offset >= 0 ? offset : -offset)
+#define KDB_DEBUG_BB_OFFSET(offset, prefix, suffix) \
+ {if (KDB_DEBUG(BB)) KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix);}
+
+#define BB_CHECK(expr, val, ret) \
+({ \
+ if (unlikely(expr)) { \
+ kdb_printf("%s, line %d: BB_CHECK(" #expr ") failed " \
+ #val "=%lx\n", \
+ __FUNCTION__, __LINE__, (long)val); \
+ bb_giveup = 1; \
+ return ret; \
+ } \
+})
+
+static int bb_giveup;
+
+/* Use BBRG_Rxx for both i386 and x86_64. RAX through R15 must be at the end,
+ * starting with RAX. Some of these codes do not reflect actual registers,
+ * such codes are special cases when parsing the record of register changes.
+ * When updating BBRG_ entries, update bbrg_name as well.
+ */
+
+enum bb_reg_code
+{
+ BBRG_UNDEFINED = 0, /* Register contents are undefined */
+ BBRG_OSP, /* original stack pointer on entry to function */
+ BBRG_RAX,
+ BBRG_RBX,
+ BBRG_RCX,
+ BBRG_RDX,
+ BBRG_RDI,
+ BBRG_RSI,
+ BBRG_RBP,
+ BBRG_RSP,
+ BBRG_R8,
+ BBRG_R9,
+ BBRG_R10,
+ BBRG_R11,
+ BBRG_R12,
+ BBRG_R13,
+ BBRG_R14,
+ BBRG_R15,
+};
+
+const static char *bbrg_name[] = {
+ [BBRG_UNDEFINED] = "undefined",
+ [BBRG_OSP] = "osp",
+ [BBRG_RAX] = "rax",
+ [BBRG_RBX] = "rbx",
+ [BBRG_RCX] = "rcx",
+ [BBRG_RDX] = "rdx",
+ [BBRG_RDI] = "rdi",
+ [BBRG_RSI] = "rsi",
+ [BBRG_RBP] = "rbp",
+ [BBRG_RSP] = "rsp",
+ [BBRG_R8] = "r8",
+ [BBRG_R9] = "r9",
+ [BBRG_R10] = "r10",
+ [BBRG_R11] = "r11",
+ [BBRG_R12] = "r12",
+ [BBRG_R13] = "r13",
+ [BBRG_R14] = "r14",
+ [BBRG_R15] = "r15",
+};
+
+/* Map a register name to its register code. This includes the sub-register
+ * addressable fields, e.g. parts of rax can be addressed as ax, al, ah, eax.
+ * The list is sorted so it can be binary chopped, sort command is:
+ * LANG=C sort -t '"' -k2
+ */
+
+struct bb_reg_code_map {
+ enum bb_reg_code reg;
+ const char *name;
+};
+
+const static struct bb_reg_code_map
+bb_reg_code_map[] = {
+ { BBRG_RAX, "ah" },
+ { BBRG_RAX, "al" },
+ { BBRG_RAX, "ax" },
+ { BBRG_RBX, "bh" },
+ { BBRG_RBX, "bl" },
+ { BBRG_RBP, "bp" },
+ { BBRG_RBP, "bpl" },
+ { BBRG_RBX, "bx" },
+ { BBRG_RCX, "ch" },
+ { BBRG_RCX, "cl" },
+ { BBRG_RCX, "cx" },
+ { BBRG_RDX, "dh" },
+ { BBRG_RDI, "di" },
+ { BBRG_RDI, "dil" },
+ { BBRG_RDX, "dl" },
+ { BBRG_RDX, "dx" },
+ { BBRG_RAX, "eax" },
+ { BBRG_RBP, "ebp" },
+ { BBRG_RBX, "ebx" },
+ { BBRG_RCX, "ecx" },
+ { BBRG_RDI, "edi" },
+ { BBRG_RDX, "edx" },
+ { BBRG_RSI, "esi" },
+ { BBRG_RSP, "esp" },
+ { BBRG_R10, "r10" },
+ { BBRG_R10, "r10d" },
+ { BBRG_R10, "r10l" },
+ { BBRG_R10, "r10w" },
+ { BBRG_R11, "r11" },
+ { BBRG_R11, "r11d" },
+ { BBRG_R11, "r11l" },
+ { BBRG_R11, "r11w" },
+ { BBRG_R12, "r12" },
+ { BBRG_R12, "r12d" },
+ { BBRG_R12, "r12l" },
+ { BBRG_R12, "r12w" },
+ { BBRG_R13, "r13" },
+ { BBRG_R13, "r13d" },
+ { BBRG_R13, "r13l" },
+ { BBRG_R13, "r13w" },
+ { BBRG_R14, "r14" },
+ { BBRG_R14, "r14d" },
+ { BBRG_R14, "r14l" },
+ { BBRG_R14, "r14w" },
+ { BBRG_R15, "r15" },
+ { BBRG_R15, "r15d" },
+ { BBRG_R15, "r15l" },
+ { BBRG_R15, "r15w" },
+ { BBRG_R8, "r8" },
+ { BBRG_R8, "r8d" },
+ { BBRG_R8, "r8l" },
+ { BBRG_R8, "r8w" },
+ { BBRG_R9, "r9" },
+ { BBRG_R9, "r9d" },
+ { BBRG_R9, "r9l" },
+ { BBRG_R9, "r9w" },
+ { BBRG_RAX, "rax" },
+ { BBRG_RBP, "rbp" },
+ { BBRG_RBX, "rbx" },
+ { BBRG_RCX, "rcx" },
+ { BBRG_RDI, "rdi" },
+ { BBRG_RDX, "rdx" },
+ { BBRG_RSI, "rsi" },
+ { BBRG_RSP, "rsp" },
+ { BBRG_RSI, "si" },
+ { BBRG_RSI, "sil" },
+ { BBRG_RSP, "sp" },
+ { BBRG_RSP, "spl" },
+};
+
+/* Record register contents in terms of the values that were passed to this
+ * function, IOW track which registers contain an input value. A register's
+ * contents can be undefined, it can contain an input register value or it can
+ * contain an offset from the original stack pointer.
+ *
+ * This structure is used to represent the current contents of the integer
+ * registers, it is held in an array that is indexed by BBRG_xxx. The element
+ * for BBRG_xxx indicates what input value is currently in BBRG_xxx. When
+ * 'value' is BBRG_OSP then register BBRG_xxx contains a stack pointer,
+ * pointing at 'offset' from the original stack pointer on entry to the
+ * function. When 'value' is not BBRG_OSP then element BBRG_xxx contains the
+ * original contents of an input register and offset is ignored.
+ *
+ * An input register 'value' can be stored in more than one register and/or in
+ * more than one memory location.
+ */
+
+struct bb_reg_contains
+{
+ enum bb_reg_code value: 8;
+ short offset;
+};
+
+/* Note: the offsets in struct bb_mem_contains in this code are _NOT_ offsets
+ * from OSP, they are offsets from current RSP. It fits better with the way
+ * that struct pt_regs is built, some code pushes extra data before pt_regs so
+ * working with OSP relative offsets gets messy. struct bb_mem_contains
+ * entries must be in descending order of RSP offset.
+ */
+
+typedef struct { DECLARE_BITMAP(bits, BBRG_R15+1); } bbrgmask_t;
+#define BB_SKIP(reg) (1 << (BBRG_ ## reg))
+struct bb_mem_contains {
+ short offset_address;
+ enum bb_reg_code value: 8;
+};
+
+/* Transfer of control to a label outside the current function. If the
+ * transfer is to a known common restore path that expects known registers
+ * and/or a known memory state (e.g. struct pt_regs) then do a sanity check on
+ * the state at this point.
+ */
+
+struct bb_name_state {
+ const char *name; /* target function */
+ bfd_vma address; /* Address of target function */
+ const char *fname; /* optional from function name */
+ const struct bb_mem_contains *mem; /* expected memory state */
+ const struct bb_reg_contains *regs; /* expected register state */
+ const unsigned short mem_size; /* ARRAY_SIZE(mem) */
+ const unsigned short regs_size; /* ARRAY_SIZE(regs) */
+ const short osp_offset; /* RSP in regs == OSP+osp_offset */
+ const bbrgmask_t skip_mem; /* Some slots in mem may be undefined */
+ const bbrgmask_t skip_regs; /* Some slots in regs may be undefined */
+};
+
+/* NS (NAME_STATE) macros define the register and memory state when we transfer
+ * control to or start decoding a special case name. Use NS when the target
+ * label always has the same state. Use NS_FROM and specify the source label
+ * if the target state is slightly different depending on where it is branched
+ * from. This gives better state checking, by isolating the special cases.
+ *
+ * Note: for the same target label, NS_FROM entries must be followed by a
+ * single NS entry.
+ */
+
+#define NS_FROM(iname, ifname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
+ { \
+ .name = iname, \
+ .fname = ifname, \
+ .mem = imem, \
+ .regs = iregs, \
+ .mem_size = ARRAY_SIZE(imem), \
+ .regs_size = ARRAY_SIZE(iregs), \
+ .skip_mem.bits[0] = iskip_mem, \
+ .skip_regs.bits[0] = iskip_regs, \
+ .osp_offset = iosp_offset, \
+ .address = 0 \
+ }
+
+/* Shorter forms for the common cases */
+#define NS(iname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
+ NS_FROM(iname, NULL, imem, iregs, iskip_mem, iskip_regs, iosp_offset)
+#define NS_MEM(iname, imem, iskip_mem) \
+ NS_FROM(iname, NULL, imem, no_regs, iskip_mem, 0, 0)
+#define NS_MEM_FROM(iname, ifname, imem, iskip_mem) \
+ NS_FROM(iname, ifname, imem, no_regs, iskip_mem, 0, 0)
+#define NS_REG(iname, iregs, iskip_regs) \
+ NS_FROM(iname, NULL, no_memory, iregs, 0, iskip_regs, 0)
+#define NS_REG_FROM(iname, ifname, iregs, iskip_regs) \
+ NS_FROM(iname, ifname, no_memory, iregs, 0, iskip_regs, 0)
+
+static void
+bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src);
+
+static const char *bb_mod_name, *bb_func_name;
+
+static int
+bb_noret(const char *name)
+{
+ if (strcmp(name, "panic") == 0 ||
+ strcmp(name, "do_exit") == 0 ||
+ strcmp(name, "do_group_exit") == 0 ||
+ strcmp(name, "complete_and_exit") == 0)
+ return 1;
+ return 0;
+}
+
+/*============================================================================*/
+/* */
+/* Most of the basic block code and data is common to x86_64 and i386. This */
+/* large ifdef contains almost all of the differences between the two */
+/* architectures. */
+/* */
+/* Make sure you update the correct section of this ifdef. */
+/* */
+/*============================================================================*/
+
+#ifdef CONFIG_X86_64
+
+/* Registers that can be used to pass parameters, in the order that parameters
+ * are passed.
+ */
+
+const static enum bb_reg_code
+bb_param_reg[] = {
+ BBRG_RDI,
+ BBRG_RSI,
+ BBRG_RDX,
+ BBRG_RCX,
+ BBRG_R8,
+ BBRG_R9,
+};
+
+const static enum bb_reg_code
+bb_preserved_reg[] = {
+ BBRG_RBX,
+ BBRG_RBP,
+ BBRG_RSP,
+ BBRG_R12,
+ BBRG_R13,
+ BBRG_R14,
+ BBRG_R15,
+};
+
+static const struct bb_mem_contains full_pt_regs[] = {
+ { 0x70, BBRG_RDI },
+ { 0x68, BBRG_RSI },
+ { 0x60, BBRG_RDX },
+ { 0x58, BBRG_RCX },
+ { 0x50, BBRG_RAX },
+ { 0x48, BBRG_R8 },
+ { 0x40, BBRG_R9 },
+ { 0x38, BBRG_R10 },
+ { 0x30, BBRG_R11 },
+ { 0x28, BBRG_RBX },
+ { 0x20, BBRG_RBP },
+ { 0x18, BBRG_R12 },
+ { 0x10, BBRG_R13 },
+ { 0x08, BBRG_R14 },
+ { 0x00, BBRG_R15 },
+};
+static const struct bb_mem_contains full_pt_regs_plus_1[] = {
+ { 0x78, BBRG_RDI },
+ { 0x70, BBRG_RSI },
+ { 0x68, BBRG_RDX },
+ { 0x60, BBRG_RCX },
+ { 0x58, BBRG_RAX },
+ { 0x50, BBRG_R8 },
+ { 0x48, BBRG_R9 },
+ { 0x40, BBRG_R10 },
+ { 0x38, BBRG_R11 },
+ { 0x30, BBRG_RBX },
+ { 0x28, BBRG_RBP },
+ { 0x20, BBRG_R12 },
+ { 0x18, BBRG_R13 },
+ { 0x10, BBRG_R14 },
+ { 0x08, BBRG_R15 },
+};
+/*
+ * Going into error_exit we have the hardware pushed error_code on the stack
+ * plus a full pt_regs
+ */
+static const struct bb_mem_contains error_code_full_pt_regs[] = {
+ { 0x78, BBRG_UNDEFINED },
+ { 0x70, BBRG_RDI },
+ { 0x68, BBRG_RSI },
+ { 0x60, BBRG_RDX },
+ { 0x58, BBRG_RCX },
+ { 0x50, BBRG_RAX },
+ { 0x48, BBRG_R8 },
+ { 0x40, BBRG_R9 },
+ { 0x38, BBRG_R10 },
+ { 0x30, BBRG_R11 },
+ { 0x28, BBRG_RBX },
+ { 0x20, BBRG_RBP },
+ { 0x18, BBRG_R12 },
+ { 0x10, BBRG_R13 },
+ { 0x08, BBRG_R14 },
+ { 0x00, BBRG_R15 },
+};
+static const struct bb_mem_contains partial_pt_regs[] = {
+ { 0x40, BBRG_RDI },
+ { 0x38, BBRG_RSI },
+ { 0x30, BBRG_RDX },
+ { 0x28, BBRG_RCX },
+ { 0x20, BBRG_RAX },
+ { 0x18, BBRG_R8 },
+ { 0x10, BBRG_R9 },
+ { 0x08, BBRG_R10 },
+ { 0x00, BBRG_R11 },
+};
+static const struct bb_mem_contains partial_pt_regs_plus_1[] = {
+ { 0x48, BBRG_RDI },
+ { 0x40, BBRG_RSI },
+ { 0x38, BBRG_RDX },
+ { 0x30, BBRG_RCX },
+ { 0x28, BBRG_RAX },
+ { 0x20, BBRG_R8 },
+ { 0x18, BBRG_R9 },
+ { 0x10, BBRG_R10 },
+ { 0x08, BBRG_R11 },
+};
+static const struct bb_mem_contains partial_pt_regs_plus_2[] = {
+ { 0x50, BBRG_RDI },
+ { 0x48, BBRG_RSI },
+ { 0x40, BBRG_RDX },
+ { 0x38, BBRG_RCX },
+ { 0x30, BBRG_RAX },
+ { 0x28, BBRG_R8 },
+ { 0x20, BBRG_R9 },
+ { 0x18, BBRG_R10 },
+ { 0x10, BBRG_R11 },
+};
+static const struct bb_mem_contains no_memory[] = {
+};
+/* Hardware has already pushed an error_code on the stack. Use undefined just
+ * to set the initial stack offset.
+ */
+static const struct bb_mem_contains error_code[] = {
+ { 0x0, BBRG_UNDEFINED },
+};
+/* error_code plus original rax */
+static const struct bb_mem_contains error_code_rax[] = {
+ { 0x8, BBRG_UNDEFINED },
+ { 0x0, BBRG_RAX },
+};
+
+static const struct bb_reg_contains all_regs[] = {
+ [BBRG_RAX] = { BBRG_RAX, 0 },
+ [BBRG_RBX] = { BBRG_RBX, 0 },
+ [BBRG_RCX] = { BBRG_RCX, 0 },
+ [BBRG_RDX] = { BBRG_RDX, 0 },
+ [BBRG_RDI] = { BBRG_RDI, 0 },
+ [BBRG_RSI] = { BBRG_RSI, 0 },
+ [BBRG_RBP] = { BBRG_RBP, 0 },
+ [BBRG_RSP] = { BBRG_OSP, 0 },
+ [BBRG_R8 ] = { BBRG_R8, 0 },
+ [BBRG_R9 ] = { BBRG_R9, 0 },
+ [BBRG_R10] = { BBRG_R10, 0 },
+ [BBRG_R11] = { BBRG_R11, 0 },
+ [BBRG_R12] = { BBRG_R12, 0 },
+ [BBRG_R13] = { BBRG_R13, 0 },
+ [BBRG_R14] = { BBRG_R14, 0 },
+ [BBRG_R15] = { BBRG_R15, 0 },
+};
+static const struct bb_reg_contains no_regs[] = {
+};
+
+static struct bb_name_state bb_special_cases[] = {
+
+ /* First the cases that pass data only in memory. We do not check any
+ * register state for these cases.
+ */
+
+ /* Simple cases, no exceptions */
+ NS_MEM("ia32_ptregs_common", partial_pt_regs_plus_1, 0),
+ NS_MEM("ia32_sysret", partial_pt_regs, 0),
+ NS_MEM("int_careful", partial_pt_regs, 0),
+ NS_MEM("ia32_badarg", partial_pt_regs, 0),
+ NS_MEM("int_restore_rest", full_pt_regs, 0),
+ NS_MEM("int_signal", full_pt_regs, 0),
+ NS_MEM("int_very_careful", partial_pt_regs, 0),
+ NS_MEM("ptregscall_common", full_pt_regs_plus_1, 0),
+ NS_MEM("ret_from_intr", partial_pt_regs_plus_2, 0),
+ NS_MEM("stub32_clone", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_execve", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_fork", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_iopl", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_rt_sigreturn", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_sigaltstack", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_sigreturn", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub32_vfork", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_clone", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_execve", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_fork", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_iopl", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_rt_sigreturn", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_sigaltstack", partial_pt_regs_plus_1, 0),
+ NS_MEM("stub_vfork", partial_pt_regs_plus_1, 0),
+ NS_MEM("sysenter_auditsys", partial_pt_regs,
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
+
+ NS_MEM("paranoid_exit", error_code_full_pt_regs, 0),
+
+ NS_MEM_FROM("ia32_badsys", "ia32_sysenter_target",
+ partial_pt_regs,
+ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
+ * some paths. It also stomps on RAX.
+ */
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX)),
+ NS_MEM_FROM("ia32_badsys", "ia32_cstar_target",
+ partial_pt_regs,
+ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
+ * paths. It also stomps on RAX. Even more confusing, instead
+ * of storing RCX it stores RBP. WTF?
+ */
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+ NS_MEM_FROM("ia32_badsys", "ia32_syscall",
+ partial_pt_regs,
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
+ NS_MEM("ia32_badsys", partial_pt_regs, 0),
+
+#ifdef CONFIG_AUDITSYSCALL
+ NS_MEM_FROM("int_with_check", "sysexit_audit", partial_pt_regs,
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX)),
+ NS_MEM_FROM("int_with_check", "ia32_cstar_target", partial_pt_regs,
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+#endif
+ NS_MEM("int_with_check", no_memory, 0),
+
+ /* Various bits of code branch to int_ret_from_sys_call, with slightly
+ * different missing values in pt_regs.
+ */
+ NS_MEM_FROM("int_ret_from_sys_call", "ret_from_fork",
+ partial_pt_regs,
+ BB_SKIP(R11)),
+ NS_MEM_FROM("int_ret_from_sys_call", "stub_execve",
+ partial_pt_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "stub_rt_sigreturn",
+ partial_pt_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "kernel_execve",
+ partial_pt_regs,
+ BB_SKIP(RAX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_syscall",
+ partial_pt_regs,
+ /* ia32_syscall only saves RDI through RCX. */
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_sysenter_target",
+ partial_pt_regs,
+ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
+ * some paths. It also stomps on RAX.
+ */
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_cstar_target",
+ partial_pt_regs,
+ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
+ * paths. It also stomps on RAX. Even more confusing, instead
+ * of storing RCX it stores RBP. WTF?
+ */
+ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+ NS_MEM_FROM("int_ret_from_sys_call", "ia32_badsys",
+ partial_pt_regs, BB_SKIP(RAX)),
+ NS_MEM("int_ret_from_sys_call", partial_pt_regs, 0),
+
+#ifdef CONFIG_PREEMPT
+ NS_MEM("retint_kernel", partial_pt_regs, BB_SKIP(RAX)),
+#endif /* CONFIG_PREEMPT */
+
+ NS_MEM("retint_careful", partial_pt_regs, BB_SKIP(RAX)),
+
+ /* Horrible hack: For a brand new x86_64 task, switch_to() branches to
+ * ret_from_fork with a totally different stack state from all the
+ * other tasks that come out of switch_to(). This non-standard state
+ * cannot be represented so just ignore the branch from switch_to() to
+ * ret_from_fork. Due to inlining and linker labels, switch_to() can
+ * appear as several different function labels, including schedule,
+ * context_switch and __sched_text_start.
+ */
+ NS_MEM_FROM("ret_from_fork", "schedule", no_memory, 0),
+ NS_MEM_FROM("ret_from_fork", "__schedule", no_memory, 0),
+ NS_MEM_FROM("ret_from_fork", "__sched_text_start", no_memory, 0),
+ NS_MEM_FROM("ret_from_fork", "context_switch", no_memory, 0),
+ NS_MEM("ret_from_fork", full_pt_regs, 0),
+
+ NS_MEM_FROM("ret_from_sys_call", "ret_from_fork",
+ partial_pt_regs,
+ BB_SKIP(R11)),
+ NS_MEM("ret_from_sys_call", partial_pt_regs, 0),
+
+ NS_MEM("retint_restore_args",
+ partial_pt_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+
+ NS_MEM("retint_swapgs",
+ partial_pt_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+
+ /* Now the cases that pass data in registers. We do not check any
+ * memory state for these cases.
+ */
+
+ NS_REG("bad_put_user",
+ all_regs, BB_SKIP(RBX)),
+
+ NS_REG("bad_get_user",
+ all_regs, BB_SKIP(RAX) | BB_SKIP(RDX)),
+
+ NS_REG("bad_to_user",
+ all_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+
+ NS_REG("ia32_ptregs_common",
+ all_regs,
+ 0),
+
+ NS_REG("copy_user_generic_unrolled",
+ all_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+
+ NS_REG("copy_user_generic_string",
+ all_regs,
+ BB_SKIP(RAX) | BB_SKIP(RCX)),
+
+ NS_REG("irq_return",
+ all_regs,
+ 0),
+
+ /* Finally the cases that pass data in both registers and memory.
+ */
+
+ NS("invalid_TSS", error_code, all_regs, 0, 0, 0),
+ NS("segment_not_present", error_code, all_regs, 0, 0, 0),
+ NS("alignment_check", error_code, all_regs, 0, 0, 0),
+ NS("page_fault", error_code, all_regs, 0, 0, 0),
+ NS("general_protection", error_code, all_regs, 0, 0, 0),
+ NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10),
+ NS("error_exit", error_code_full_pt_regs, no_regs, 0, 0, 0x30),
+ NS("common_interrupt", error_code, all_regs, 0, 0, -0x8),
+ NS("save_args", error_code, all_regs, 0, 0, -0x50),
+ NS("int3", no_memory, all_regs, 0, 0, -0x80),
+};
+
+static const char *bb_spurious[] = {
+ /* schedule */
+ "thread_return",
+ /* system_call */
+ "system_call_after_swapgs",
+ "system_call_fastpath",
+ "ret_from_sys_call",
+ "sysret_check",
+ "sysret_careful",
+ "sysret_signal",
+ "badsys",
+#ifdef CONFIG_AUDITSYSCALL
+ "auditsys",
+ "sysret_audit",
+#endif
+ "tracesys",
+ "int_ret_from_sys_call",
+ "int_with_check",
+ "int_careful",
+ "int_very_careful",
+ "int_signal",
+ "int_restore_rest",
+ /* common_interrupt */
+ "ret_from_intr",
+ "exit_intr",
+ "retint_with_reschedule",
+ "retint_check",
+ "retint_swapgs",
+ "retint_restore_args",
+ "restore_args",
+ "irq_return",
+ "bad_iret",
+ "retint_careful",
+ "retint_signal",
+#ifdef CONFIG_PREEMPT
+ "retint_kernel",
+#endif /* CONFIG_PREEMPT */
+ /* paranoid_exit */
+ "paranoid_swapgs",
+ "paranoid_restore",
+ "paranoid_userspace",
+ "paranoid_schedule",
+ /* error_entry */
+ "error_swapgs",
+ "error_sti",
+ "error_kernelspace",
+ /* nmi */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ "nmi_swapgs",
+ "nmi_restore",
+ "nmi_userspace",
+ "nmi_schedule",
+#endif
+ /* load_gs_index */
+ "gs_change",
+ "bad_gs",
+ /* ia32_sysenter_target */
+ "sysenter_do_call",
+ "sysenter_dispatch",
+ "sysexit_from_sys_call",
+#ifdef CONFIG_AUDITSYSCALL
+ "sysenter_auditsys",
+ "sysexit_audit",
+#endif
+ "sysenter_tracesys",
+ /* ia32_cstar_target */
+ "cstar_do_call",
+ "cstar_dispatch",
+ "sysretl_from_sys_call",
+#ifdef CONFIG_AUDITSYSCALL
+ "cstar_auditsys",
+ "sysretl_audit",
+#endif
+ "cstar_tracesys",
+ /* ia32_syscall */
+ "ia32_do_call",
+ "ia32_sysret",
+ "ia32_tracesys",
+#ifdef CONFIG_HIBERNATION
+ /* restore_image */
+ "loop",
+ "done",
+#endif /* CONFIG_HIBERNATION */
+#ifdef CONFIG_KPROBES
+ /* jprobe_return */
+ "jprobe_return_end",
+ /* kretprobe_trampoline_holder */
+ "kretprobe_trampoline",
+#endif /* CONFIG_KPROBES */
+#ifdef CONFIG_KEXEC
+ /* relocate_kernel */
+ "relocate_new_kernel",
+#endif /* CONFIG_KEXEC */
+#ifdef CONFIG_XEN
+ /* arch/i386/xen/xen-asm.S */
+ "xen_irq_enable_direct_end",
+ "xen_irq_disable_direct_end",
+ "xen_save_fl_direct_end",
+ "xen_restore_fl_direct_end",
+ "xen_iret_start_crit",
+ "iret_restore_end",
+ "xen_iret_end_crit",
+ "hyper_iret",
+#endif /* CONFIG_XEN */
+};
+
+static const char *bb_hardware_handlers[] = {
+ "system_call",
+ "common_interrupt",
+ "error_entry",
+ "debug",
+ "nmi",
+ "int3",
+ "double_fault",
+ "stack_segment",
+ "machine_check",
+ "kdb_call",
+};
+
+static int
+bb_hardware_pushed_arch(kdb_machreg_t rsp,
+ const struct kdb_activation_record *ar)
+{
+ /* x86_64 interrupt stacks are 16 byte aligned and you must get the
+ * next rsp from stack, it cannot be statically calculated. Do not
+ * include the word at rsp, it is pushed by hardware but is treated as
+ * a normal software return value.
+ *
+ * When an IST switch occurs (e.g. NMI) then the saved rsp points to
+ * another stack entirely. Assume that the IST stack is 16 byte
+ * aligned and just return the size of the hardware data on this stack.
+ * The stack unwind code will take care of the stack switch.
+ */
+ kdb_machreg_t saved_rsp = *((kdb_machreg_t *)rsp + 3);
+ int hardware_pushed = saved_rsp - rsp - KDB_WORD_SIZE;
+ if (hardware_pushed < 4 * KDB_WORD_SIZE ||
+ saved_rsp < ar->stack.logical_start ||
+ saved_rsp >= ar->stack.logical_end)
+ return 4 * KDB_WORD_SIZE;
+ else
+ return hardware_pushed;
+}
+
+static void
+bb_start_block0(void)
+{
+ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
+ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
+ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
+ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
+ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
+ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
+ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
+ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
+ bb_reg_code_set_value(BBRG_R8, BBRG_R8);
+ bb_reg_code_set_value(BBRG_R9, BBRG_R9);
+ bb_reg_code_set_value(BBRG_R10, BBRG_R10);
+ bb_reg_code_set_value(BBRG_R11, BBRG_R11);
+ bb_reg_code_set_value(BBRG_R12, BBRG_R12);
+ bb_reg_code_set_value(BBRG_R13, BBRG_R13);
+ bb_reg_code_set_value(BBRG_R14, BBRG_R14);
+ bb_reg_code_set_value(BBRG_R15, BBRG_R15);
+}
+
+/* x86_64 does not have a special case for __switch_to */
+
+static void
+bb_fixup_switch_to(char *p)
+{
+}
+
+static int
+bb_asmlinkage_arch(void)
+{
+ return strncmp(bb_func_name, "__down", 6) == 0 ||
+ strncmp(bb_func_name, "__up", 4) == 0 ||
+ strncmp(bb_func_name, "stub_", 5) == 0 ||
+ strcmp(bb_func_name, "ret_from_fork") == 0 ||
+ strcmp(bb_func_name, "ptregscall_common") == 0;
+}
+
+#else /* !CONFIG_X86_64 */
+
+/* Registers that can be used to pass parameters, in the order that parameters
+ * are passed.
+ */
+
+const static enum bb_reg_code
+bb_param_reg[] = {
+ BBRG_RAX,
+ BBRG_RDX,
+ BBRG_RCX,
+};
+
+const static enum bb_reg_code
+bb_preserved_reg[] = {
+ BBRG_RBX,
+ BBRG_RBP,
+ BBRG_RSP,
+ BBRG_RSI,
+ BBRG_RDI,
+};
+
+static const struct bb_mem_contains full_pt_regs[] = {
+ { 0x18, BBRG_RAX },
+ { 0x14, BBRG_RBP },
+ { 0x10, BBRG_RDI },
+ { 0x0c, BBRG_RSI },
+ { 0x08, BBRG_RDX },
+ { 0x04, BBRG_RCX },
+ { 0x00, BBRG_RBX },
+};
+static const struct bb_mem_contains no_memory[] = {
+};
+/* Hardware has already pushed an error_code on the stack. Use undefined just
+ * to set the initial stack offset.
+ */
+static const struct bb_mem_contains error_code[] = {
+ { 0x0, BBRG_UNDEFINED },
+};
+/* rbx already pushed */
+static const struct bb_mem_contains rbx_pushed[] = {
+ { 0x0, BBRG_RBX },
+};
+#ifdef CONFIG_MATH_EMULATION
+static const struct bb_mem_contains mem_fpu_reg_round[] = {
+ { 0xc, BBRG_RBP },
+ { 0x8, BBRG_RSI },
+ { 0x4, BBRG_RDI },
+ { 0x0, BBRG_RBX },
+};
+#endif /* CONFIG_MATH_EMULATION */
+
+static const struct bb_reg_contains all_regs[] = {
+ [BBRG_RAX] = { BBRG_RAX, 0 },
+ [BBRG_RBX] = { BBRG_RBX, 0 },
+ [BBRG_RCX] = { BBRG_RCX, 0 },
+ [BBRG_RDX] = { BBRG_RDX, 0 },
+ [BBRG_RDI] = { BBRG_RDI, 0 },
+ [BBRG_RSI] = { BBRG_RSI, 0 },
+ [BBRG_RBP] = { BBRG_RBP, 0 },
+ [BBRG_RSP] = { BBRG_OSP, 0 },
+};
+static const struct bb_reg_contains no_regs[] = {
+};
+#ifdef CONFIG_MATH_EMULATION
+static const struct bb_reg_contains reg_fpu_reg_round[] = {
+ [BBRG_RBP] = { BBRG_OSP, -0x4 },
+ [BBRG_RSP] = { BBRG_OSP, -0x10 },
+};
+#endif /* CONFIG_MATH_EMULATION */
+
+static struct bb_name_state bb_special_cases[] = {
+
+ /* First the cases that pass data only in memory. We do not check any
+ * register state for these cases.
+ */
+
+ /* Simple cases, no exceptions */
+ NS_MEM("check_userspace", full_pt_regs, 0),
+ NS_MEM("device_not_available_emulate", full_pt_regs, 0),
+ NS_MEM("ldt_ss", full_pt_regs, 0),
+ NS_MEM("no_singlestep", full_pt_regs, 0),
+ NS_MEM("restore_all", full_pt_regs, 0),
+ NS_MEM("restore_nocheck", full_pt_regs, 0),
+ NS_MEM("restore_nocheck_notrace", full_pt_regs, 0),
+ NS_MEM("ret_from_exception", full_pt_regs, 0),
+ NS_MEM("ret_from_fork", full_pt_regs, 0),
+ NS_MEM("ret_from_intr", full_pt_regs, 0),
+ NS_MEM("work_notifysig", full_pt_regs, 0),
+ NS_MEM("work_pending", full_pt_regs, 0),
+
+#ifdef CONFIG_PREEMPT
+ NS_MEM("resume_kernel", full_pt_regs, 0),
+#endif /* CONFIG_PREEMPT */
+
+ NS_MEM("common_interrupt", error_code, 0),
+ NS_MEM("error_code", error_code, 0),
+
+ NS_MEM("bad_put_user", rbx_pushed, 0),
+
+ NS_MEM_FROM("resume_userspace", "syscall_badsys",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM_FROM("resume_userspace", "syscall_fault",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM_FROM("resume_userspace", "syscall_trace_entry",
+ full_pt_regs, BB_SKIP(RAX)),
+ /* Too difficult to trace through the various vm86 functions for now.
+ * They are C functions that start off with some memory state, fiddle
+ * the registers then jmp directly to resume_userspace. For the
+ * moment, just assume that they are valid and do no checks.
+ */
+ NS_FROM("resume_userspace", "do_int",
+ no_memory, no_regs, 0, 0, 0),
+ NS_FROM("resume_userspace", "do_sys_vm86",
+ no_memory, no_regs, 0, 0, 0),
+ NS_FROM("resume_userspace", "handle_vm86_fault",
+ no_memory, no_regs, 0, 0, 0),
+ NS_FROM("resume_userspace", "handle_vm86_trap",
+ no_memory, no_regs, 0, 0, 0),
+ NS_MEM("resume_userspace", full_pt_regs, 0),
+
+ NS_MEM_FROM("syscall_badsys", "ia32_sysenter_target",
+ full_pt_regs, BB_SKIP(RBP)),
+ NS_MEM("syscall_badsys", full_pt_regs, 0),
+
+ NS_MEM_FROM("syscall_call", "syscall_trace_entry",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM("syscall_call", full_pt_regs, 0),
+
+ NS_MEM_FROM("syscall_exit", "syscall_trace_entry",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM("syscall_exit", full_pt_regs, 0),
+
+ NS_MEM_FROM("syscall_exit_work", "ia32_sysenter_target",
+ full_pt_regs, BB_SKIP(RAX) | BB_SKIP(RBP)),
+ NS_MEM_FROM("syscall_exit_work", "system_call",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM("syscall_exit_work", full_pt_regs, 0),
+
+ NS_MEM_FROM("syscall_trace_entry", "ia32_sysenter_target",
+ full_pt_regs, BB_SKIP(RBP)),
+ NS_MEM_FROM("syscall_trace_entry", "system_call",
+ full_pt_regs, BB_SKIP(RAX)),
+ NS_MEM("syscall_trace_entry", full_pt_regs, 0),
+
+ /* Now the cases that pass data in registers. We do not check any
+ * memory state for these cases.
+ */
+
+ NS_REG("syscall_fault", all_regs, 0),
+
+ NS_REG("bad_get_user", all_regs,
+ BB_SKIP(RAX) | BB_SKIP(RDX)),
+
+ /* Finally the cases that pass data in both registers and memory.
+ */
+
+ /* This entry is redundant now because bb_fixup_switch_to() hides the
+ * jmp __switch_to case, however the entry is left here as
+ * documentation.
+ *
+ * NS("__switch_to", no_memory, no_regs, 0, 0, 0),
+ */
+
+ NS("iret_exc", no_memory, all_regs, 0, 0, 0x20),
+
+#ifdef CONFIG_MATH_EMULATION
+ NS("fpu_reg_round", mem_fpu_reg_round, reg_fpu_reg_round, 0, 0, 0),
+#endif /* CONFIG_MATH_EMULATION */
+};
+
+static const char *bb_spurious[] = {
+ /* ret_from_exception */
+ "ret_from_intr",
+ "check_userspace",
+ "resume_userspace",
+ /* resume_kernel */
+#ifdef CONFIG_PREEMPT
+ "need_resched",
+#endif /* CONFIG_PREEMPT */
+ /* ia32_sysenter_target */
+ "sysenter_past_esp",
+ /* system_call */
+ "no_singlestep",
+ "syscall_call",
+ "syscall_exit",
+ "restore_all",
+ "restore_nocheck",
+ "restore_nocheck_notrace",
+ "ldt_ss",
+ /* do not include iret_exc, it is in a .fixup section */
+ /* work_pending */
+ "work_resched",
+ "work_notifysig",
+#ifdef CONFIG_VM86
+ "work_notifysig_v86",
+#endif /* CONFIG_VM86 */
+ /* page_fault */
+ "error_code",
+ /* device_not_available */
+ "device_not_available_emulate",
+ /* debug */
+ "debug_esp_fix_insn",
+ "debug_stack_correct",
+ /* nmi */
+ "nmi_stack_correct",
+ "nmi_stack_fixup",
+ "nmi_debug_stack_check",
+ "nmi_espfix_stack",
+#ifdef CONFIG_HIBERNATION
+ /* restore_image */
+ "copy_loop",
+ "done",
+#endif /* CONFIG_HIBERNATION */
+#ifdef CONFIG_KPROBES
+ /* jprobe_return */
+ "jprobe_return_end",
+#endif /* CONFIG_KPROBES */
+#ifdef CONFIG_KEXEC
+ /* relocate_kernel */
+ "relocate_new_kernel",
+#endif /* CONFIG_KEXEC */
+#ifdef CONFIG_MATH_EMULATION
+ /* assorted *.S files in arch/i386/math_emu */
+ "Denorm_done",
+ "Denorm_shift_more_than_32",
+ "Denorm_shift_more_than_63",
+ "Denorm_shift_more_than_64",
+ "Do_unmasked_underflow",
+ "Exp_not_underflow",
+ "fpu_Arith_exit",
+ "fpu_reg_round",
+ "fpu_reg_round_signed_special_exit",
+ "fpu_reg_round_special_exit",
+ "L_accum_done",
+ "L_accum_loaded",
+ "L_accum_loop",
+ "L_arg1_larger",
+ "L_bugged",
+ "L_bugged_1",
+ "L_bugged_2",
+ "L_bugged_3",
+ "L_bugged_4",
+ "L_bugged_denorm_486",
+ "L_bugged_round24",
+ "L_bugged_round53",
+ "L_bugged_round64",
+ "LCheck_24_round_up",
+ "LCheck_53_round_up",
+ "LCheck_Round_Overflow",
+ "LCheck_truncate_24",
+ "LCheck_truncate_53",
+ "LCheck_truncate_64",
+ "LDenormal_adj_exponent",
+ "L_deNormalised",
+ "LDo_24_round_up",
+ "LDo_2nd_32_bits",
+ "LDo_2nd_div",
+ "LDo_3rd_32_bits",
+ "LDo_3rd_div",
+ "LDo_53_round_up",
+ "LDo_64_round_up",
+ "L_done",
+ "LDo_truncate_24",
+ "LDown_24",
+ "LDown_53",
+ "LDown_64",
+ "L_entry_bugged",
+ "L_error_exit",
+ "L_exactly_32",
+ "L_exception_exit",
+ "L_exit",
+ "L_exit_nuo_valid",
+ "L_exit_nuo_zero",
+ "L_exit_valid",
+ "L_extent_zero",
+ "LFirst_div_done",
+ "LFirst_div_not_1",
+ "L_Full_Division",
+ "LGreater_Half_24",
+ "LGreater_Half_53",
+ "LGreater_than_1",
+ "LLess_than_1",
+ "L_Make_denorm",
+ "L_more_31_no_low",
+ "L_more_63_no_low",
+ "L_more_than_31",
+ "L_more_than_63",
+ "L_more_than_64",
+ "L_more_than_65",
+ "L_more_than_95",
+ "L_must_be_zero",
+ "L_n_exit",
+ "L_no_adjust",
+ "L_no_bit_lost",
+ "L_no_overflow",
+ "L_no_precision_loss",
+ "L_Normalised",
+ "L_norm_bugged",
+ "L_n_shift_1",
+ "L_nuo_shift_1",
+ "L_overflow",
+ "L_precision_lost_down",
+ "L_precision_lost_up",
+ "LPrevent_2nd_overflow",
+ "LPrevent_3rd_overflow",
+ "LPseudoDenormal",
+ "L_Re_normalise",
+ "LResult_Normalised",
+ "L_round",
+ "LRound_large",
+ "LRound_nearest_24",
+ "LRound_nearest_53",
+ "LRound_nearest_64",
+ "LRound_not_small",
+ "LRound_ovfl",
+ "LRound_precision",
+ "LRound_prep",
+ "L_round_the_result",
+ "LRound_To_24",
+ "LRound_To_53",
+ "LRound_To_64",
+ "LSecond_div_done",
+ "LSecond_div_not_1",
+ "L_shift_1",
+ "L_shift_32",
+ "L_shift_65_nc",
+ "L_shift_done",
+ "Ls_less_than_32",
+ "Ls_more_than_63",
+ "Ls_more_than_95",
+ "L_Store_significand",
+ "L_subtr",
+ "LTest_over",
+ "LTruncate_53",
+ "LTruncate_64",
+ "L_underflow",
+ "L_underflow_to_zero",
+ "LUp_24",
+ "LUp_53",
+ "LUp_64",
+ "L_zero",
+ "Normalise_result",
+ "Signal_underflow",
+ "sqrt_arg_ge_2",
+ "sqrt_get_more_precision",
+ "sqrt_more_prec_large",
+ "sqrt_more_prec_ok",
+ "sqrt_more_prec_small",
+ "sqrt_near_exact",
+ "sqrt_near_exact_large",
+ "sqrt_near_exact_ok",
+ "sqrt_near_exact_small",
+ "sqrt_near_exact_x",
+ "sqrt_prelim_no_adjust",
+ "sqrt_round_result",
+ "sqrt_stage_2_done",
+ "sqrt_stage_2_error",
+ "sqrt_stage_2_finish",
+ "sqrt_stage_2_positive",
+ "sqrt_stage_3_error",
+ "sqrt_stage_3_finished",
+ "sqrt_stage_3_no_error",
+ "sqrt_stage_3_positive",
+ "Unmasked_underflow",
+ "xExp_not_underflow",
+#endif /* CONFIG_MATH_EMULATION */
+};
+
+static const char *bb_hardware_handlers[] = {
+ "ret_from_exception",
+ "system_call",
+ "work_pending",
+ "syscall_fault",
+ "page_fault",
+ "coprocessor_error",
+ "simd_coprocessor_error",
+ "device_not_available",
+ "debug",
+ "nmi",
+ "int3",
+ "overflow",
+ "bounds",
+ "invalid_op",
+ "coprocessor_segment_overrun",
+ "invalid_TSS",
+ "segment_not_present",
+ "stack_segment",
+ "general_protection",
+ "alignment_check",
+ "kdb_call",
+ "divide_error",
+ "machine_check",
+ "spurious_interrupt_bug",
+};
+
+static int
+bb_hardware_pushed_arch(kdb_machreg_t rsp,
+ const struct kdb_activation_record *ar)
+{
+ return (2 * KDB_WORD_SIZE);
+}
+
+static void
+bb_start_block0(void)
+{
+ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
+ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
+ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
+ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
+ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
+ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
+ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
+ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
+}
+
+/* The i386 code that switches stack in a context switch is an extremely
+ * special case. It saves the rip pointing to a label that is not otherwise
+ * referenced, saves the current rsp then pushes a word. The magic code that
+ * resumes the new task picks up the saved rip and rsp, effectively referencing
+ * a label that otherwise is not used and ignoring the pushed word.
+ *
+ * The simplest way to handle this very strange case is to recognise jmp
+ * address <__switch_to> and treat it as a popfl instruction. This avoids
+ * terminating the block on this jmp and removes one word from the stack state,
+ * which is the end effect of all the magic code.
+ *
+ * Called with the instruction line, starting after the first ':'.
+ */
+
+static void
+bb_fixup_switch_to(char *p)
+{
+ char *p1 = p;
+ p += strspn(p, " \t"); /* start of instruction */
+ if (strncmp(p, "jmp", 3))
+ return;
+ p += strcspn(p, " \t"); /* end of instruction */
+ p += strspn(p, " \t"); /* start of address */
+ p += strcspn(p, " \t"); /* end of address */
+ p += strspn(p, " \t"); /* start of comment */
+ if (strcmp(p, "<__switch_to>") == 0)
+ strcpy(p1, "popfl");
+}
+
+static int
+bb_asmlinkage_arch(void)
+{
+ return strcmp(bb_func_name, "ret_from_exception") == 0 ||
+ strcmp(bb_func_name, "syscall_trace_entry") == 0;
+}
+
+#endif /* CONFIG_X86_64 */
+
+
+/*============================================================================*/
+/* */
+/* Common code and data. */
+/* */
+/*============================================================================*/
+
+
+/* Tracking registers by decoding the instructions is quite a bit harder than
+ * doing the same tracking using compiler generated information. Register
+ * contents can remain in the same register, they can be copied to other
+ * registers, they can be stored on stack or they can be modified/overwritten.
+ * At any one time, there are 0 or more copies of the original value that was
+ * supplied in each register on input to the current function. If a register
+ * exists in multiple places, one copy of that register is the master version,
+ * the others are temporary copies which may or may not be destroyed before the
+ * end of the function.
+ *
+ * The compiler knows which copy of a register is the master and which are
+ * temporary copies, which makes it relatively easy to track register contents
+ * as they are saved and restored. Without that compiler based knowledge, this
+ * code has to track _every_ possible copy of each register, simply because we
+ * do not know which is the master copy and which are temporary copies which
+ * may be destroyed later.
+ *
+ * It gets worse: registers that contain parameters can be copied to other
+ * registers which are then saved on stack in a lower level function. Also the
+ * stack pointer may be held in multiple registers (typically RSP and RBP)
+ * which contain different offsets from the base of the stack on entry to this
+ * function. All of which means that we have to track _all_ register
+ * movements, or at least as much as possible.
+ *
+ * Start with the basic block that contains the start of the function, by
+ * definition all registers contain their initial value. Track each
+ * instruction's effect on register contents, this includes reading from a
+ * parameter register before any write to that register, IOW the register
+ * really does contain a parameter. The register state is represented by a
+ * dynamically sized array with each entry containing :-
+ *
+ * Register name
+ * Location it is copied to (another register or stack + offset)
+ *
+ * Besides the register tracking array, we track which parameter registers are
+ * read before being written, to determine how many parameters are passed in
+ * registers. We also track which registers contain stack pointers, including
+ * their offset from the original stack pointer on entry to the function.
+ *
+ * At each exit from the current basic block (via JMP instruction or drop
+ * through), the register state is cloned to form the state on input to the
+ * target basic block and the target is marked for processing using this state.
+ * When there are multiple ways to enter a basic block (e.g. several JMP
+ * instructions referencing the same target) then there will be multiple sets
+ * of register state to form the "input" for that basic block, there is no
+ * guarantee that all paths to that block will have the same register state.
+ *
+ * As each target block is processed, all the known sets of register state are
+ * merged to form a suitable subset of the state which agrees with all the
+ * inputs. The most common case is where one path to this block copies a
+ * register to another register but another path does not, therefore the copy
+ * is only a temporary and should not be propogated into this block.
+ *
+ * If the target block already has an input state from the current transfer
+ * point and the new input state is identical to the previous input state then
+ * we have reached a steady state for the arc from the current location to the
+ * target block. Therefore there is no need to process the target block again.
+ *
+ * The steps of "process a block, create state for target block(s), pick a new
+ * target block, merge state for target block, process target block" will
+ * continue until all the state changes have propogated all the way down the
+ * basic block tree, including round any cycles in the tree. The merge step
+ * only deletes tracking entries from the input state(s), it never adds a
+ * tracking entry. Therefore the overall algorithm is guaranteed to converge
+ * to a steady state, the worst possible case is that every tracking entry into
+ * a block is deleted, which will result in an empty output state.
+ *
+ * As each instruction is decoded, it is checked to see if this is the point at
+ * which execution left this function. This can be a call to another function
+ * (actually the return address to this function) or is the instruction which
+ * was about to be executed when an interrupt occurred (including an oops).
+ * Save the register state at this point.
+ *
+ * We always know what the registers contain when execution left this function.
+ * For an interrupt, the registers are in struct pt_regs. For a call to
+ * another function, we have already deduced the register state on entry to the
+ * other function by unwinding to the start of that function. Given the
+ * register state on exit from this function plus the known register contents
+ * on entry to the next function, we can determine the stack pointer value on
+ * input to this function. That in turn lets us calculate the address of input
+ * registers that have been stored on stack, giving us the input parameters.
+ * Finally the stack pointer gives us the return address which is the exit
+ * point from the calling function, repeat the unwind process on that function.
+ *
+ * The data that tracks which registers contain input parameters is function
+ * global, not local to any basic block. To determine which input registers
+ * contain parameters, we have to decode the entire function. Otherwise an
+ * exit early in the function might not have read any parameters yet.
+ */
+
+/* Record memory contents in terms of the values that were passed to this
+ * function, IOW track which memory locations contain an input value. A memory
+ * location's contents can be undefined, it can contain an input register value
+ * or it can contain an offset from the original stack pointer.
+ *
+ * This structure is used to record register contents that have been stored in
+ * memory. Location (BBRG_OSP + 'offset_address') contains the input value
+ * from register 'value'. When 'value' is BBRG_OSP then offset_value contains
+ * the offset from the original stack pointer that was stored in this memory
+ * location. When 'value' is not BBRG_OSP then the memory location contains
+ * the original contents of an input register and offset_value is ignored.
+ *
+ * An input register 'value' can be stored in more than one register and/or in
+ * more than one memory location.
+ */
+
+struct bb_memory_contains
+{
+ short offset_address;
+ enum bb_reg_code value: 8;
+ short offset_value;
+};
+
+/* Track the register state in each basic block. */
+
+struct bb_reg_state
+{
+ /* Indexed by register value 'reg - BBRG_RAX' */
+ struct bb_reg_contains contains[KDB_INT_REGISTERS];
+ int ref_count;
+ int mem_count;
+ /* dynamic size for memory locations, see mem_count */
+ struct bb_memory_contains memory[0];
+};
+
+static struct bb_reg_state *bb_reg_state, *bb_exit_state;
+static int bb_reg_state_max, bb_reg_params, bb_memory_params;
+
+struct bb_actual
+{
+ bfd_vma value;
+ int valid;
+};
+
+/* Contains the actual hex value of a register, plus a valid bit. Indexed by
+ * register value 'reg - BBRG_RAX'
+ */
+static struct bb_actual bb_actual[KDB_INT_REGISTERS];
+
+static bfd_vma bb_func_start, bb_func_end;
+static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr,
+ bb_thread_return, bb_sync_regs, bb_save_v86_state,
+ bb__sched_text_start, bb__sched_text_end,
+ bb_save_args, bb_save_rest, bb_save_paranoid;
+
+/* Record jmp instructions, both conditional and unconditional. These form the
+ * arcs between the basic blocks. This is also used to record the state when
+ * one block drops through into the next.
+ *
+ * A bb can have multiple associated bb_jmp entries, one for each jcc
+ * instruction plus at most one bb_jmp for the drop through case. If a bb
+ * drops through to the next bb then the drop through bb_jmp entry will be the
+ * last entry in the set of bb_jmp's that are associated with the bb. This is
+ * enforced by the fact that jcc entries are added during the disassembly phase
+ * of pass 1, the drop through entries are added near the end of pass 1.
+ *
+ * At address 'from' in this block, we have a jump to address 'to'. The
+ * register state at 'from' is copied to the target block.
+ */
+
+struct bb_jmp
+{
+ bfd_vma from;
+ bfd_vma to;
+ struct bb_reg_state *state;
+ unsigned int drop_through: 1;
+};
+
+struct bb
+{
+ bfd_vma start;
+ /* The end address of a basic block is sloppy. It can be the first
+ * byte of the last instruction in the block or it can be the last byte
+ * of the block.
+ */
+ bfd_vma end;
+ unsigned int changed: 1;
+ unsigned int drop_through: 1;
+};
+
+static struct bb **bb_list, *bb_curr;
+static int bb_max, bb_count;
+
+static struct bb_jmp *bb_jmp_list;
+static int bb_jmp_max, bb_jmp_count;
+
+/* Add a new bb entry to the list. This does an insert sort. */
+
+static struct bb *
+bb_new(bfd_vma order)
+{
+ int i, j;
+ struct bb *bb, *p;
+ if (bb_giveup)
+ return NULL;
+ if (bb_count == bb_max) {
+ struct bb **bb_list_new;
+ bb_max += 10;
+ bb_list_new = debug_kmalloc(bb_max*sizeof(*bb_list_new),
+ GFP_ATOMIC);
+ if (!bb_list_new) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ return NULL;
+ }
+ memcpy(bb_list_new, bb_list, bb_count*sizeof(*bb_list));
+ debug_kfree(bb_list);
+ bb_list = bb_list_new;
+ }
+ bb = debug_kmalloc(sizeof(*bb), GFP_ATOMIC);
+ if (!bb) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ return NULL;
+ }
+ memset(bb, 0, sizeof(*bb));
+ for (i = 0; i < bb_count; ++i) {
+ p = bb_list[i];
+ if ((p->start && p->start > order) ||
+ (p->end && p->end > order))
+ break;
+ }
+ for (j = bb_count-1; j >= i; --j)
+ bb_list[j+1] = bb_list[j];
+ bb_list[i] = bb;
+ ++bb_count;
+ return bb;
+}
+
+/* Add a new bb_jmp entry to the list. This list is not sorted. */
+
+static struct bb_jmp *
+bb_jmp_new(bfd_vma from, bfd_vma to, unsigned int drop_through)
+{
+ struct bb_jmp *bb_jmp;
+ if (bb_giveup)
+ return NULL;
+ if (bb_jmp_count == bb_jmp_max) {
+ struct bb_jmp *bb_jmp_list_new;
+ bb_jmp_max += 10;
+ bb_jmp_list_new =
+ debug_kmalloc(bb_jmp_max*sizeof(*bb_jmp_list_new),
+ GFP_ATOMIC);
+ if (!bb_jmp_list_new) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ return NULL;
+ }
+ memcpy(bb_jmp_list_new, bb_jmp_list,
+ bb_jmp_count*sizeof(*bb_jmp_list));
+ debug_kfree(bb_jmp_list);
+ bb_jmp_list = bb_jmp_list_new;
+ }
+ bb_jmp = bb_jmp_list + bb_jmp_count++;
+ bb_jmp->from = from;
+ bb_jmp->to = to;
+ bb_jmp->drop_through = drop_through;
+ bb_jmp->state = NULL;
+ return bb_jmp;
+}
+
+static void
+bb_delete(int i)
+{
+ struct bb *bb = bb_list[i];
+ memcpy(bb_list+i, bb_list+i+1, (bb_count-i-1)*sizeof(*bb_list));
+ bb_list[--bb_count] = NULL;
+ debug_kfree(bb);
+}
+
+static struct bb *
+bb_add(bfd_vma start, bfd_vma end)
+{
+ int i;
+ struct bb *bb;
+ /* Ignore basic blocks whose start address is outside the current
+ * function. These occur for call instructions and for tail recursion.
+ */
+ if (start &&
+ (start < bb_func_start || start >= bb_func_end))
+ return NULL;
+ for (i = 0; i < bb_count; ++i) {
+ bb = bb_list[i];
+ if ((start && bb->start == start) ||
+ (end && bb->end == end))
+ return bb;
+ }
+ bb = bb_new(start ? start : end);
+ if (bb) {
+ bb->start = start;
+ bb->end = end;
+ }
+ return bb;
+}
+
+static struct bb_jmp *
+bb_jmp_add(bfd_vma from, bfd_vma to, unsigned int drop_through)
+{
+ int i;
+ struct bb_jmp *bb_jmp;
+ for (i = 0, bb_jmp = bb_jmp_list; i < bb_jmp_count; ++i, ++bb_jmp) {
+ if (bb_jmp->from == from &&
+ bb_jmp->to == to &&
+ bb_jmp->drop_through == drop_through)
+ return bb_jmp;
+ }
+ bb_jmp = bb_jmp_new(from, to, drop_through);
+ return bb_jmp;
+}
+
+static unsigned long bb_curr_addr, bb_exit_addr;
+static char bb_buffer[256]; /* A bit too big to go on stack */
+
+/* Computed jmp uses 'jmp *addr(,%reg,[48])' where 'addr' is the start of a
+ * table of addresses that point into the current function. Run the table and
+ * generate bb starts for each target address plus a bb_jmp from this address
+ * to the target address.
+ *
+ * Only called for 'jmp' instructions, with the pointer starting at 'jmp'.
+ */
+
+static void
+bb_pass1_computed_jmp(char *p)
+{
+ unsigned long table, scale;
+ kdb_machreg_t addr;
+ struct bb* bb;
+ p += strcspn(p, " \t"); /* end of instruction */
+ p += strspn(p, " \t"); /* start of address */
+ if (*p++ != '*')
+ return;
+ table = simple_strtoul(p, &p, 0);
+ if (strncmp(p, "(,%", 3) != 0)
+ return;
+ p += 3;
+ p += strcspn(p, ","); /* end of reg */
+ if (*p++ != ',')
+ return;
+ scale = simple_strtoul(p, &p, 0);
+ if (scale != KDB_WORD_SIZE || strcmp(p, ")"))
+ return;
+ while (!bb_giveup) {
+ if (kdb_getword(&addr, table, sizeof(addr)))
+ return;
+ if (addr < bb_func_start || addr >= bb_func_end)
+ return;
+ bb = bb_add(addr, 0);
+ if (bb)
+ bb_jmp_add(bb_curr_addr, addr, 0);
+ table += KDB_WORD_SIZE;
+ }
+}
+
+/* Pass 1, identify the start and end of each basic block */
+
+static int
+bb_dis_pass1(PTR file, const char *fmt, ...)
+{
+ int l = strlen(bb_buffer);
+ char *p;
+ va_list ap;
+ va_start(ap, fmt);
+ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
+ va_end(ap);
+ if ((p = strchr(bb_buffer, '\n'))) {
+ *p = '\0';
+ /* ret[q], iret[q], sysexit, sysret, ud2a or jmp[q] end a
+ * block. As does a call to a function marked noret.
+ */
+ p = bb_buffer;
+ p += strcspn(p, ":");
+ if (*p++ == ':') {
+ bb_fixup_switch_to(p);
+ p += strspn(p, " \t"); /* start of instruction */
+ if (strncmp(p, "ret", 3) == 0 ||
+ strncmp(p, "iret", 4) == 0 ||
+ strncmp(p, "sysexit", 7) == 0 ||
+ strncmp(p, "sysret", 6) == 0 ||
+ strncmp(p, "ud2a", 4) == 0 ||
+ strncmp(p, "jmp", 3) == 0) {
+ if (strncmp(p, "jmp", 3) == 0)
+ bb_pass1_computed_jmp(p);
+ bb_add(0, bb_curr_addr);
+ };
+ if (strncmp(p, "call", 4) == 0) {
+ strsep(&p, " \t"); /* end of opcode */
+ if (p)
+ p += strspn(p, " \t"); /* operand(s) */
+ if (p && strchr(p, '<')) {
+ p = strchr(p, '<') + 1;
+ *strchr(p, '>') = '\0';
+ if (bb_noret(p))
+ bb_add(0, bb_curr_addr);
+ }
+ };
+ }
+ bb_buffer[0] = '\0';
+ }
+ return 0;
+}
+
+static void
+bb_printaddr_pass1(bfd_vma addr, disassemble_info *dip)
+{
+ kdb_symtab_t symtab;
+ unsigned int offset;
+ struct bb* bb;
+ /* disasm only calls the printaddr routine for the target of jmp, loop
+ * or call instructions, i.e. the start of a basic block. call is
+ * ignored by bb_add because the target address is outside the current
+ * function.
+ */
+ dip->fprintf_func(dip->stream, "0x%lx", addr);
+ kdbnearsym(addr, &symtab);
+ if (symtab.sym_name) {
+ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
+ if ((offset = addr - symtab.sym_start))
+ dip->fprintf_func(dip->stream, "+0x%x", offset);
+ dip->fprintf_func(dip->stream, ">");
+ }
+ bb = bb_add(addr, 0);
+ if (bb)
+ bb_jmp_add(bb_curr_addr, addr, 0);
+}
+
+static void
+bb_pass1(void)
+{
+ int i;
+ unsigned long addr;
+ struct bb *bb;
+ struct bb_jmp *bb_jmp;
+
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf("%s: func_name %s func_start " kdb_bfd_vma_fmt0
+ " func_end " kdb_bfd_vma_fmt0 "\n",
+ __FUNCTION__,
+ bb_func_name,
+ bb_func_start,
+ bb_func_end);
+ kdb_di.fprintf_func = bb_dis_pass1;
+ kdb_di.print_address_func = bb_printaddr_pass1;
+
+ bb_add(bb_func_start, 0);
+ for (bb_curr_addr = bb_func_start;
+ bb_curr_addr < bb_func_end;
+ ++bb_curr_addr) {
+ unsigned char c;
+ if (kdb_getarea(c, bb_curr_addr)) {
+ kdb_printf("%s: unreadable function code at ",
+ __FUNCTION__);
+ kdb_symbol_print(bb_curr_addr, NULL, KDB_SP_DEFAULT);
+ kdb_printf(", giving up\n");
+ bb_giveup = 1;
+ return;
+ }
+ }
+ for (addr = bb_func_start; addr < bb_func_end; ) {
+ bb_curr_addr = addr;
+ addr += kdba_id_printinsn(addr, &kdb_di);
+ kdb_di.fprintf_func(NULL, "\n");
+ }
+ if (bb_giveup)
+ goto out;
+
+ /* Special case: a block consisting of a single instruction which is
+ * both the target of a jmp and is also an ending instruction, so we
+ * add two blocks using the same address, one as a start and one as an
+ * end, in no guaranteed order. The end must be ordered after the
+ * start.
+ */
+ for (i = 0; i < bb_count-1; ++i) {
+ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
+ if (bb1->end && bb1->end == bb2->start) {
+ bb = bb_list[i+1];
+ bb_list[i+1] = bb_list[i];
+ bb_list[i] = bb;
+ }
+ }
+
+ /* Some bb have a start address, some have an end address. Collapse
+ * them into entries that have both start and end addresses. The first
+ * entry is guaranteed to have a start address.
+ */
+ for (i = 0; i < bb_count-1; ++i) {
+ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
+ if (bb1->end)
+ continue;
+ if (bb2->start) {
+ bb1->end = bb2->start - 1;
+ bb1->drop_through = 1;
+ bb_jmp_add(bb1->end, bb2->start, 1);
+ } else {
+ bb1->end = bb2->end;
+ bb_delete(i+1);
+ }
+ }
+ bb = bb_list[bb_count-1];
+ if (!bb->end)
+ bb->end = bb_func_end - 1;
+
+ /* It would be nice to check that all bb have a valid start and end
+ * address but there is just too much garbage code in the kernel to do
+ * that check. Aligned functions in assembler code mean that there is
+ * space between the end of one function and the start of the next and
+ * that space contains previous code from the assembler's buffers. It
+ * looks like dead code with nothing that branches to it, so no start
+ * address. do_sys_vm86() ends with 'jmp resume_userspace' which the C
+ * compiler does not know about so gcc appends the normal exit code,
+ * again nothing branches to this dangling code.
+ *
+ * The best we can do is delete bb entries with no start address.
+ */
+ for (i = 0; i < bb_count; ++i) {
+ struct bb *bb = bb_list[i];
+ if (!bb->start)
+ bb_delete(i--);
+ }
+ for (i = 0; i < bb_count; ++i) {
+ struct bb *bb = bb_list[i];
+ if (!bb->end) {
+ kdb_printf("%s: incomplete bb state\n", __FUNCTION__);
+ bb_giveup = 1;
+ goto debug;
+ }
+ }
+
+out:
+ if (!KDB_DEBUG(BB))
+ return;
+debug:
+ kdb_printf("%s: end\n", __FUNCTION__);
+ for (i = 0; i < bb_count; ++i) {
+ bb = bb_list[i];
+ kdb_printf(" bb[%d] start "
+ kdb_bfd_vma_fmt0
+ " end " kdb_bfd_vma_fmt0
+ " drop_through %d",
+ i, bb->start, bb->end, bb->drop_through);
+ kdb_printf("\n");
+ }
+ for (i = 0; i < bb_jmp_count; ++i) {
+ bb_jmp = bb_jmp_list + i;
+ kdb_printf(" bb_jmp[%d] from "
+ kdb_bfd_vma_fmt0
+ " to " kdb_bfd_vma_fmt0
+ " drop_through %d\n",
+ i, bb_jmp->from, bb_jmp->to, bb_jmp->drop_through);
+ }
+}
+
+/* Pass 2, record register changes in each basic block */
+
+/* For each opcode that we care about, indicate how it uses its operands. Most
+ * opcodes can be handled generically because they completely specify their
+ * operands in the instruction, however many opcodes have side effects such as
+ * reading or writing rax or updating rsp. Instructions that change registers
+ * that are not listed in the operands must be handled as special cases. In
+ * addition, instructions that copy registers while preserving their contents
+ * (push, pop, mov) or change the contents in a well defined way (add with an
+ * immediate, lea) must be handled as special cases in order to track the
+ * register contents.
+ *
+ * The tables below only list opcodes that are actually used in the Linux
+ * kernel, so they omit most of the floating point and all of the SSE type
+ * instructions. The operand usage entries only cater for accesses to memory
+ * and to the integer registers, accesses to floating point registers and flags
+ * are not relevant for kernel backtraces.
+ */
+
+enum bb_operand_usage {
+ BBOU_UNKNOWN = 0,
+ /* generic entries. because xchg can do any combinations of
+ * read src, write src, read dst and write dst we need to
+ * define all 16 possibilities. These are ordered by rs = 1,
+ * rd = 2, ws = 4, wd = 8, bb_usage_x*() functions rely on this
+ * order.
+ */
+ BBOU_RS = 1, /* read src */ /* 1 */
+ BBOU_RD, /* read dst */ /* 2 */
+ BBOU_RSRD, /* 3 */
+ BBOU_WS, /* write src */ /* 4 */
+ BBOU_RSWS, /* 5 */
+ BBOU_RDWS, /* 6 */
+ BBOU_RSRDWS, /* 7 */
+ BBOU_WD, /* write dst */ /* 8 */
+ BBOU_RSWD, /* 9 */
+ BBOU_RDWD, /* 10 */
+ BBOU_RSRDWD, /* 11 */
+ BBOU_WSWD, /* 12 */
+ BBOU_RSWSWD, /* 13 */
+ BBOU_RDWSWD, /* 14 */
+ BBOU_RSRDWSWD, /* 15 */
+ /* opcode specific entries */
+ BBOU_ADD,
+ BBOU_AND,
+ BBOU_CALL,
+ BBOU_CBW,
+ BBOU_CMOV,
+ BBOU_CMPXCHG,
+ BBOU_CMPXCHGD,
+ BBOU_CPUID,
+ BBOU_CWD,
+ BBOU_DIV,
+ BBOU_IDIV,
+ BBOU_IMUL,
+ BBOU_IRET,
+ BBOU_JMP,
+ BBOU_LAHF,
+ BBOU_LEA,
+ BBOU_LEAVE,
+ BBOU_LODS,
+ BBOU_LOOP,
+ BBOU_LSS,
+ BBOU_MONITOR,
+ BBOU_MOV,
+ BBOU_MOVS,
+ BBOU_MUL,
+ BBOU_MWAIT,
+ BBOU_NOP,
+ BBOU_OUTS,
+ BBOU_POP,
+ BBOU_POPF,
+ BBOU_PUSH,
+ BBOU_PUSHF,
+ BBOU_RDMSR,
+ BBOU_RDTSC,
+ BBOU_RET,
+ BBOU_SAHF,
+ BBOU_SCAS,
+ BBOU_SUB,
+ BBOU_SYSEXIT,
+ BBOU_SYSRET,
+ BBOU_WRMSR,
+ BBOU_XADD,
+ BBOU_XCHG,
+ BBOU_XOR,
+};
+
+struct bb_opcode_usage {
+ int length;
+ enum bb_operand_usage usage;
+ const char *opcode;
+};
+
+/* This table is sorted in alphabetical order of opcode, except that the
+ * trailing '"' is treated as a high value. For example, 'in' sorts after
+ * 'inc', 'bt' after 'btc'. This modified sort order ensures that shorter
+ * opcodes come after long ones. A normal sort would put 'in' first, so 'in'
+ * would match both 'inc' and 'in'. When adding any new entries to this table,
+ * be careful to put shorter entries last in their group.
+ *
+ * To automatically sort the table (in vi)
+ * Mark the first and last opcode line with 'a and 'b
+ * 'a
+ * !'bsed -e 's/"}/}}/' | LANG=C sort -t '"' -k2 | sed -e 's/}}/"}/'
+ *
+ * If a new instruction has to be added, first consider if it affects registers
+ * other than those listed in the operands. Also consider if you want to track
+ * the results of issuing the instruction, IOW can you extract useful
+ * information by looking in detail at the modified registers or memory. If
+ * either test is true then you need a special case to handle the instruction.
+ *
+ * The generic entries at the start of enum bb_operand_usage all have one thing
+ * in common, if a register or memory location is updated then that location
+ * becomes undefined, i.e. we lose track of anything that was previously saved
+ * in that location. So only use a generic BBOU_* value when the result of the
+ * instruction cannot be calculated exactly _and_ when all the affected
+ * registers are listed in the operands.
+ *
+ * Examples:
+ *
+ * 'call' does not generate a known result, but as a side effect of call,
+ * several scratch registers become undefined, so it needs a special BBOU_CALL
+ * entry.
+ *
+ * 'adc' generates a variable result, it depends on the carry flag, so 'adc'
+ * gets a generic entry. 'add' can generate an exact result (add with
+ * immediate on a register that points to the stack) or it can generate an
+ * unknown result (add a variable, or add immediate to a register that does not
+ * contain a stack pointer) so 'add' has its own BBOU_ADD entry.
+ */
+
+static const struct bb_opcode_usage
+bb_opcode_usage_all[] = {
+ {3, BBOU_RSRDWD, "adc"},
+ {3, BBOU_ADD, "add"},
+ {3, BBOU_AND, "and"},
+ {3, BBOU_RSWD, "bsf"},
+ {3, BBOU_RSWD, "bsr"},
+ {5, BBOU_RSWS, "bswap"},
+ {3, BBOU_RSRDWD, "btc"},
+ {3, BBOU_RSRDWD, "btr"},
+ {3, BBOU_RSRDWD, "bts"},
+ {2, BBOU_RSRD, "bt"},
+ {4, BBOU_CALL, "call"},
+ {4, BBOU_CBW, "cbtw"}, /* Intel cbw */
+ {3, BBOU_NOP, "clc"},
+ {3, BBOU_NOP, "cld"},
+ {7, BBOU_RS, "clflush"},
+ {4, BBOU_NOP, "clgi"},
+ {3, BBOU_NOP, "cli"},
+ {4, BBOU_CWD, "cltd"}, /* Intel cdq */
+ {4, BBOU_CBW, "cltq"}, /* Intel cdqe */
+ {4, BBOU_NOP, "clts"},
+ {4, BBOU_CMOV, "cmov"},
+ {9, BBOU_CMPXCHGD,"cmpxchg16"},
+ {8, BBOU_CMPXCHGD,"cmpxchg8"},
+ {7, BBOU_CMPXCHG, "cmpxchg"},
+ {3, BBOU_RSRD, "cmp"},
+ {5, BBOU_CPUID, "cpuid"},
+ {4, BBOU_CWD, "cqto"}, /* Intel cdo */
+ {4, BBOU_CWD, "cwtd"}, /* Intel cwd */
+ {4, BBOU_CBW, "cwtl"}, /* Intel cwde */
+ {4, BBOU_NOP, "data"}, /* alternative ASM_NOP<n> generates data16 on x86_64 */
+ {3, BBOU_RSWS, "dec"},
+ {3, BBOU_DIV, "div"},
+ {5, BBOU_RS, "fdivl"},
+ {5, BBOU_NOP, "finit"},
+ {6, BBOU_RS, "fistpl"},
+ {4, BBOU_RS, "fldl"},
+ {4, BBOU_RS, "fmul"},
+ {6, BBOU_NOP, "fnclex"},
+ {6, BBOU_NOP, "fninit"},
+ {6, BBOU_RS, "fnsave"},
+ {7, BBOU_NOP, "fnsetpm"},
+ {6, BBOU_RS, "frstor"},
+ {5, BBOU_WS, "fstsw"},
+ {5, BBOU_RS, "fsubp"},
+ {5, BBOU_NOP, "fwait"},
+ {7, BBOU_RS, "fxrstor"},
+ {6, BBOU_RS, "fxsave"},
+ {3, BBOU_NOP, "hlt"},
+ {4, BBOU_IDIV, "idiv"},
+ {4, BBOU_IMUL, "imul"},
+ {3, BBOU_RSWS, "inc"},
+ {3, BBOU_NOP, "int"},
+ {7, BBOU_RSRD, "invlpga"},
+ {6, BBOU_RS, "invlpg"},
+ {2, BBOU_RSWD, "in"},
+ {4, BBOU_IRET, "iret"},
+ {1, BBOU_JMP, "j"},
+ {4, BBOU_LAHF, "lahf"},
+ {3, BBOU_RSWD, "lar"},
+ {5, BBOU_RS, "lcall"},
+ {5, BBOU_LEAVE, "leave"},
+ {3, BBOU_LEA, "lea"},
+ {6, BBOU_NOP, "lfence"},
+ {4, BBOU_RS, "lgdt"},
+ {4, BBOU_RS, "lidt"},
+ {4, BBOU_RS, "ljmp"},
+ {4, BBOU_RS, "lldt"},
+ {4, BBOU_RS, "lmsw"},
+ {4, BBOU_LODS, "lods"},
+ {4, BBOU_LOOP, "loop"},
+ {4, BBOU_NOP, "lret"},
+ {3, BBOU_RSWD, "lsl"},
+ {3, BBOU_LSS, "lss"},
+ {3, BBOU_RS, "ltr"},
+ {6, BBOU_NOP, "mfence"},
+ {7, BBOU_MONITOR, "monitor"},
+ {4, BBOU_MOVS, "movs"},
+ {3, BBOU_MOV, "mov"},
+ {3, BBOU_MUL, "mul"},
+ {5, BBOU_MWAIT, "mwait"},
+ {3, BBOU_RSWS, "neg"},
+ {3, BBOU_NOP, "nop"},
+ {3, BBOU_RSWS, "not"},
+ {2, BBOU_RSRDWD, "or"},
+ {4, BBOU_OUTS, "outs"},
+ {3, BBOU_RSRD, "out"},
+ {5, BBOU_NOP, "pause"},
+ {4, BBOU_POPF, "popf"},
+ {3, BBOU_POP, "pop"},
+ {8, BBOU_RS, "prefetch"},
+ {5, BBOU_PUSHF, "pushf"},
+ {4, BBOU_PUSH, "push"},
+ {3, BBOU_RSRDWD, "rcl"},
+ {3, BBOU_RSRDWD, "rcr"},
+ {5, BBOU_RDMSR, "rdmsr"},
+ {5, BBOU_RDMSR, "rdpmc"}, /* same side effects as rdmsr */
+ {5, BBOU_RDTSC, "rdtsc"},
+ {3, BBOU_RET, "ret"},
+ {3, BBOU_RSRDWD, "rol"},
+ {3, BBOU_RSRDWD, "ror"},
+ {4, BBOU_SAHF, "sahf"},
+ {3, BBOU_RSRDWD, "sar"},
+ {3, BBOU_RSRDWD, "sbb"},
+ {4, BBOU_SCAS, "scas"},
+ {3, BBOU_WS, "set"},
+ {6, BBOU_NOP, "sfence"},
+ {4, BBOU_WS, "sgdt"},
+ {3, BBOU_RSRDWD, "shl"},
+ {3, BBOU_RSRDWD, "shr"},
+ {4, BBOU_WS, "sidt"},
+ {4, BBOU_WS, "sldt"},
+ {3, BBOU_NOP, "stc"},
+ {3, BBOU_NOP, "std"},
+ {4, BBOU_NOP, "stgi"},
+ {3, BBOU_NOP, "sti"},
+ {4, BBOU_SCAS, "stos"},
+ {4, BBOU_WS, "strl"},
+ {3, BBOU_WS, "str"},
+ {3, BBOU_SUB, "sub"},
+ {6, BBOU_NOP, "swapgs"},
+ {7, BBOU_SYSEXIT, "sysexit"},
+ {6, BBOU_SYSRET, "sysret"},
+ {4, BBOU_NOP, "test"},
+ {4, BBOU_NOP, "ud2a"},
+ {7, BBOU_RS, "vmclear"},
+ {8, BBOU_NOP, "vmlaunch"},
+ {6, BBOU_RS, "vmload"},
+ {7, BBOU_RS, "vmptrld"},
+ {6, BBOU_WD, "vmread"}, /* vmread src is an encoding, not a register */
+ {8, BBOU_NOP, "vmresume"},
+ {5, BBOU_RS, "vmrun"},
+ {6, BBOU_RS, "vmsave"},
+ {7, BBOU_WD, "vmwrite"}, /* vmwrite src is an encoding, not a register */
+ {3, BBOU_NOP, "vmxoff"},
+ {6, BBOU_NOP, "wbinvd"},
+ {5, BBOU_WRMSR, "wrmsr"},
+ {4, BBOU_XADD, "xadd"},
+ {4, BBOU_XCHG, "xchg"},
+ {3, BBOU_XOR, "xor"},
+ {4, BBOU_NOP, "xrstor"},
+ {4, BBOU_NOP, "xsave"},
+ {10, BBOU_WS, "xstore-rng"},
+};
+
+/* To speed up searching, index bb_opcode_usage_all by the first letter of each
+ * opcode.
+ */
+static struct {
+ const struct bb_opcode_usage *opcode;
+ int size;
+} bb_opcode_usage[26];
+
+struct bb_operand {
+ char *base;
+ char *index;
+ char *segment;
+ long disp;
+ unsigned int scale;
+ enum bb_reg_code base_rc; /* UNDEFINED or RAX through R15 */
+ enum bb_reg_code index_rc; /* UNDEFINED or RAX through R15 */
+ unsigned int present :1;
+ unsigned int disp_present :1;
+ unsigned int indirect :1; /* must be combined with reg or memory */
+ unsigned int immediate :1; /* exactly one of these 3 must be set */
+ unsigned int reg :1;
+ unsigned int memory :1;
+};
+
+struct bb_decode {
+ char *prefix;
+ char *opcode;
+ const struct bb_opcode_usage *match;
+ struct bb_operand src;
+ struct bb_operand dst;
+ struct bb_operand dst2;
+};
+
+static struct bb_decode bb_decode;
+
+static enum bb_reg_code
+bb_reg_map(const char *reg)
+{
+ int lo, hi, c;
+ const struct bb_reg_code_map *p;
+ lo = 0;
+ hi = ARRAY_SIZE(bb_reg_code_map) - 1;
+ while (lo <= hi) {
+ int mid = (hi + lo) / 2;
+ p = bb_reg_code_map + mid;
+ c = strcmp(p->name, reg+1);
+ if (c == 0)
+ return p->reg;
+ else if (c > 0)
+ hi = mid - 1;
+ else
+ lo = mid + 1;
+ }
+ return BBRG_UNDEFINED;
+}
+
+static void
+bb_parse_operand(char *str, struct bb_operand *operand)
+{
+ char *p = str;
+ int sign = 1;
+ operand->present = 1;
+ /* extract any segment prefix */
+ if (p[0] == '%' && p[1] && p[2] == 's' && p[3] == ':') {
+ operand->memory = 1;
+ operand->segment = p;
+ p[3] = '\0';
+ p += 4;
+ }
+ /* extract displacement, base, index, scale */
+ if (*p == '*') {
+ /* jmp/call *disp(%reg), *%reg or *0xnnn */
+ operand->indirect = 1;
+ ++p;
+ }
+ if (*p == '-') {
+ sign = -1;
+ ++p;
+ }
+ if (*p == '$') {
+ operand->immediate = 1;
+ operand->disp_present = 1;
+ operand->disp = simple_strtoul(p+1, &p, 0);
+ } else if (isdigit(*p)) {
+ operand->memory = 1;
+ operand->disp_present = 1;
+ operand->disp = simple_strtoul(p, &p, 0) * sign;
+ }
+ if (*p == '%') {
+ operand->reg = 1;
+ operand->base = p;
+ } else if (*p == '(') {
+ operand->memory = 1;
+ operand->base = ++p;
+ p += strcspn(p, ",)");
+ if (p == operand->base)
+ operand->base = NULL;
+ if (*p == ',') {
+ *p = '\0';
+ operand->index = ++p;
+ p += strcspn(p, ",)");
+ if (p == operand->index)
+ operand->index = NULL;
+ }
+ if (*p == ',') {
+ *p = '\0';
+ operand->scale = simple_strtoul(p+1, &p, 0);
+ }
+ *p = '\0';
+ } else if (*p) {
+ kdb_printf("%s: unexpected token '%c' after disp '%s'\n",
+ __FUNCTION__, *p, str);
+ bb_giveup = 1;
+ }
+ if ((operand->immediate + operand->reg + operand->memory != 1) ||
+ (operand->indirect && operand->immediate)) {
+ kdb_printf("%s: incorrect decode '%s' N %d I %d R %d M %d\n",
+ __FUNCTION__, str,
+ operand->indirect, operand->immediate, operand->reg,
+ operand->memory);
+ bb_giveup = 1;
+ }
+ if (operand->base)
+ operand->base_rc = bb_reg_map(operand->base);
+ if (operand->index)
+ operand->index_rc = bb_reg_map(operand->index);
+}
+
+static void
+bb_print_operand(const char *type, const struct bb_operand *operand)
+{
+ if (!operand->present)
+ return;
+ kdb_printf(" %s %c%c: ",
+ type,
+ operand->indirect ? 'N' : ' ',
+ operand->immediate ? 'I' :
+ operand->reg ? 'R' :
+ operand->memory ? 'M' :
+ '?'
+ );
+ if (operand->segment)
+ kdb_printf("%s:", operand->segment);
+ if (operand->immediate) {
+ kdb_printf("$0x%lx", operand->disp);
+ } else if (operand->reg) {
+ if (operand->indirect)
+ kdb_printf("*");
+ kdb_printf("%s", operand->base);
+ } else if (operand->memory) {
+ if (operand->indirect && (operand->base || operand->index))
+ kdb_printf("*");
+ if (operand->disp_present) {
+ kdb_printf("0x%lx", operand->disp);
+ }
+ if (operand->base || operand->index || operand->scale) {
+ kdb_printf("(");
+ if (operand->base)
+ kdb_printf("%s", operand->base);
+ if (operand->index || operand->scale)
+ kdb_printf(",");
+ if (operand->index)
+ kdb_printf("%s", operand->index);
+ if (operand->scale)
+ kdb_printf(",%d", operand->scale);
+ kdb_printf(")");
+ }
+ }
+ if (operand->base_rc)
+ kdb_printf(" base_rc %d (%s)",
+ operand->base_rc, bbrg_name[operand->base_rc]);
+ if (operand->index_rc)
+ kdb_printf(" index_rc %d (%s)",
+ operand->index_rc,
+ bbrg_name[operand->index_rc]);
+ kdb_printf("\n");
+}
+
+static void
+bb_print_opcode(void)
+{
+ const struct bb_opcode_usage *o = bb_decode.match;
+ kdb_printf(" ");
+ if (bb_decode.prefix)
+ kdb_printf("%s ", bb_decode.prefix);
+ kdb_printf("opcode '%s' matched by '%s', usage %d\n",
+ bb_decode.opcode, o->opcode, o->usage);
+}
+
+static int
+bb_parse_opcode(void)
+{
+ int c, i;
+ const struct bb_opcode_usage *o;
+ static int bb_parse_opcode_error_limit = 5;
+ c = bb_decode.opcode[0] - 'a';
+ if (c < 0 || c >= ARRAY_SIZE(bb_opcode_usage))
+ goto nomatch;
+ o = bb_opcode_usage[c].opcode;
+ if (!o)
+ goto nomatch;
+ for (i = 0; i < bb_opcode_usage[c].size; ++i, ++o) {
+ if (strncmp(bb_decode.opcode, o->opcode, o->length) == 0) {
+ bb_decode.match = o;
+ if (KDB_DEBUG(BB))
+ bb_print_opcode();
+ return 0;
+ }
+ }
+nomatch:
+ if (!bb_parse_opcode_error_limit)
+ return 1;
+ --bb_parse_opcode_error_limit;
+ kdb_printf("%s: no match at [%s]%s " kdb_bfd_vma_fmt0 " - '%s'\n",
+ __FUNCTION__,
+ bb_mod_name, bb_func_name, bb_curr_addr,
+ bb_decode.opcode);
+ return 1;
+}
+
+static bool
+bb_is_int_reg(enum bb_reg_code reg)
+{
+ return reg >= BBRG_RAX && reg < (BBRG_RAX + KDB_INT_REGISTERS);
+}
+
+static bool
+bb_is_simple_memory(const struct bb_operand *operand)
+{
+ return operand->memory &&
+ bb_is_int_reg(operand->base_rc) &&
+ !operand->index_rc &&
+ operand->scale == 0 &&
+ !operand->segment;
+}
+
+static bool
+bb_is_static_disp(const struct bb_operand *operand)
+{
+ return operand->memory &&
+ !operand->base_rc &&
+ !operand->index_rc &&
+ operand->scale == 0 &&
+ !operand->segment &&
+ !operand->indirect;
+}
+
+static enum bb_reg_code
+bb_reg_code_value(enum bb_reg_code reg)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
+ return bb_reg_state->contains[reg - BBRG_RAX].value;
+}
+
+static short
+bb_reg_code_offset(enum bb_reg_code reg)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
+ return bb_reg_state->contains[reg - BBRG_RAX].offset;
+}
+
+static void
+bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src)
+{
+ BB_CHECK(!bb_is_int_reg(dst), dst, );
+ bb_reg_state->contains[dst - BBRG_RAX].value = src;
+}
+
+static void
+bb_reg_code_set_offset(enum bb_reg_code dst, short offset)
+{
+ BB_CHECK(!bb_is_int_reg(dst), dst, );
+ bb_reg_state->contains[dst - BBRG_RAX].offset = offset;
+}
+
+static bool
+bb_is_osp_defined(enum bb_reg_code reg)
+{
+ if (bb_is_int_reg(reg))
+ return bb_reg_code_value(reg) == BBRG_OSP;
+ else
+ return 0;
+}
+
+static bfd_vma
+bb_actual_value(enum bb_reg_code reg)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
+ return bb_actual[reg - BBRG_RAX].value;
+}
+
+static int
+bb_actual_valid(enum bb_reg_code reg)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
+ return bb_actual[reg - BBRG_RAX].valid;
+}
+
+static void
+bb_actual_set_value(enum bb_reg_code reg, bfd_vma value)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, );
+ bb_actual[reg - BBRG_RAX].value = value;
+}
+
+static void
+bb_actual_set_valid(enum bb_reg_code reg, int valid)
+{
+ BB_CHECK(!bb_is_int_reg(reg), reg, );
+ bb_actual[reg - BBRG_RAX].valid = valid;
+}
+
+/* The scheduler code switches RSP then does PUSH, it is not an error for RSP
+ * to be undefined in this area of the code.
+ */
+static bool
+bb_is_scheduler_address(void)
+{
+ return bb_curr_addr >= bb__sched_text_start &&
+ bb_curr_addr < bb__sched_text_end;
+}
+
+static void
+bb_reg_read(enum bb_reg_code reg)
+{
+ int i, r = 0;
+ if (!bb_is_int_reg(reg) ||
+ bb_reg_code_value(reg) != reg)
+ return;
+ for (i = 0;
+ i < min_t(unsigned int, REGPARM, ARRAY_SIZE(bb_param_reg));
+ ++i) {
+ if (reg == bb_param_reg[i]) {
+ r = i + 1;
+ break;
+ }
+ }
+ bb_reg_params = max(bb_reg_params, r);
+}
+
+static void
+bb_do_reg_state_print(const struct bb_reg_state *s)
+{
+ int i, offset_address, offset_value;
+ const struct bb_memory_contains *c;
+ enum bb_reg_code value;
+ kdb_printf(" bb_reg_state %p\n", s);
+ for (i = 0; i < ARRAY_SIZE(s->contains); ++i) {
+ value = s->contains[i].value;
+ offset_value = s->contains[i].offset;
+ kdb_printf(" %s = %s",
+ bbrg_name[i + BBRG_RAX], bbrg_name[value]);
+ if (value == BBRG_OSP)
+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
+ kdb_printf("\n");
+ }
+ for (i = 0, c = s->memory; i < s->mem_count; ++i, ++c) {
+ offset_address = c->offset_address;
+ value = c->value;
+ offset_value = c->offset_value;
+ kdb_printf(" slot %d offset_address %c0x%x %s",
+ i,
+ offset_address >= 0 ? '+' : '-',
+ offset_address >= 0 ? offset_address : -offset_address,
+ bbrg_name[value]);
+ if (value == BBRG_OSP)
+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
+ kdb_printf("\n");
+ }
+}
+
+static void
+bb_reg_state_print(const struct bb_reg_state *s)
+{
+ if (KDB_DEBUG(BB))
+ bb_do_reg_state_print(s);
+}
+
+/* Set register 'dst' to contain the value from 'src'. This includes reading
+ * from 'src' and writing to 'dst'. The offset value is copied iff 'src'
+ * contains a stack pointer.
+ *
+ * Be very careful about the context here. 'dst' and 'src' reflect integer
+ * registers by name, _not_ by the value of their contents. "mov %rax,%rsi"
+ * will call this function as bb_reg_set_reg(BBRG_RSI, BBRG_RAX), which
+ * reflects what the assembler code is doing. However we need to track the
+ * _values_ in the registers, not their names. IOW, we really care about "what
+ * value does rax contain when it is copied into rsi?", so we can record the
+ * fact that we now have two copies of that value, one in rax and one in rsi.
+ */
+
+static void
+bb_reg_set_reg(enum bb_reg_code dst, enum bb_reg_code src)
+{
+ enum bb_reg_code src_value = BBRG_UNDEFINED;
+ short offset_value = 0;
+ KDB_DEBUG_BB(" %s = %s", bbrg_name[dst], bbrg_name[src]);
+ if (bb_is_int_reg(src)) {
+ bb_reg_read(src);
+ src_value = bb_reg_code_value(src);
+ KDB_DEBUG_BB(" (%s", bbrg_name[src_value]);
+ if (bb_is_osp_defined(src)) {
+ offset_value = bb_reg_code_offset(src);
+ KDB_DEBUG_BB_OFFSET(offset_value, "", "");
+ }
+ KDB_DEBUG_BB(")");
+ }
+ if (bb_is_int_reg(dst)) {
+ bb_reg_code_set_value(dst, src_value);
+ bb_reg_code_set_offset(dst, offset_value);
+ }
+ KDB_DEBUG_BB("\n");
+}
+
+static void
+bb_reg_set_undef(enum bb_reg_code dst)
+{
+ bb_reg_set_reg(dst, BBRG_UNDEFINED);
+}
+
+/* Delete any record of a stored register held in osp + 'offset' */
+
+static void
+bb_delete_memory(short offset)
+{
+ int i;
+ struct bb_memory_contains *c;
+ for (i = 0, c = bb_reg_state->memory;
+ i < bb_reg_state->mem_count;
+ ++i, ++c) {
+ if (c->offset_address == offset &&
+ c->value != BBRG_UNDEFINED) {
+ KDB_DEBUG_BB(" delete %s from ",
+ bbrg_name[c->value]);
+ KDB_DEBUG_BB_OFFSET(offset, "osp", "");
+ KDB_DEBUG_BB(" slot %d\n",
+ (int)(c - bb_reg_state->memory));
+ memset(c, BBRG_UNDEFINED, sizeof(*c));
+ if (i == bb_reg_state->mem_count - 1)
+ --bb_reg_state->mem_count;
+ }
+ }
+}
+
+/* Set memory location *('dst' + 'offset_address') to contain the supplied
+ * value and offset. 'dst' is assumed to be a register that contains a stack
+ * pointer.
+ */
+
+static void
+bb_memory_set_reg_value(enum bb_reg_code dst, short offset_address,
+ enum bb_reg_code value, short offset_value)
+{
+ int i;
+ struct bb_memory_contains *c, *free = NULL;
+ BB_CHECK(!bb_is_osp_defined(dst), dst, );
+ KDB_DEBUG_BB(" *(%s", bbrg_name[dst]);
+ KDB_DEBUG_BB_OFFSET(offset_address, "", "");
+ offset_address += bb_reg_code_offset(dst);
+ KDB_DEBUG_BB_OFFSET(offset_address, " osp", ") = ");
+ KDB_DEBUG_BB("%s", bbrg_name[value]);
+ if (value == BBRG_OSP)
+ KDB_DEBUG_BB_OFFSET(offset_value, "", "");
+ for (i = 0, c = bb_reg_state->memory;
+ i < bb_reg_state_max;
+ ++i, ++c) {
+ if (c->offset_address == offset_address)
+ free = c;
+ else if (c->value == BBRG_UNDEFINED && !free)
+ free = c;
+ }
+ if (!free) {
+ struct bb_reg_state *new, *old = bb_reg_state;
+ size_t old_size, new_size;
+ int slot;
+ old_size = sizeof(*old) + bb_reg_state_max *
+ sizeof(old->memory[0]);
+ slot = bb_reg_state_max;
+ bb_reg_state_max += 5;
+ new_size = sizeof(*new) + bb_reg_state_max *
+ sizeof(new->memory[0]);
+ new = debug_kmalloc(new_size, GFP_ATOMIC);
+ if (!new) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ } else {
+ memcpy(new, old, old_size);
+ memset((char *)new + old_size, BBRG_UNDEFINED,
+ new_size - old_size);
+ bb_reg_state = new;
+ debug_kfree(old);
+ free = bb_reg_state->memory + slot;
+ }
+ }
+ if (free) {
+ int slot = free - bb_reg_state->memory;
+ free->offset_address = offset_address;
+ free->value = value;
+ free->offset_value = offset_value;
+ KDB_DEBUG_BB(" slot %d", slot);
+ bb_reg_state->mem_count = max(bb_reg_state->mem_count, slot+1);
+ }
+ KDB_DEBUG_BB("\n");
+}
+
+/* Set memory location *('dst' + 'offset') to contain the value from register
+ * 'src'. 'dst' is assumed to be a register that contains a stack pointer.
+ * This differs from bb_memory_set_reg_value because it takes a src register
+ * which contains a value and possibly an offset, bb_memory_set_reg_value is
+ * passed the value and offset directly.
+ */
+
+static void
+bb_memory_set_reg(enum bb_reg_code dst, enum bb_reg_code src,
+ short offset_address)
+{
+ int offset_value;
+ enum bb_reg_code value;
+ BB_CHECK(!bb_is_osp_defined(dst), dst, );
+ if (!bb_is_int_reg(src))
+ return;
+ value = bb_reg_code_value(src);
+ if (value == BBRG_UNDEFINED) {
+ bb_delete_memory(offset_address + bb_reg_code_offset(dst));
+ return;
+ }
+ offset_value = bb_reg_code_offset(src);
+ bb_reg_read(src);
+ bb_memory_set_reg_value(dst, offset_address, value, offset_value);
+}
+
+/* Set register 'dst' to contain the value from memory *('src' + offset_address).
+ * 'src' is assumed to be a register that contains a stack pointer.
+ */
+
+static void
+bb_reg_set_memory(enum bb_reg_code dst, enum bb_reg_code src, short offset_address)
+{
+ int i, defined = 0;
+ struct bb_memory_contains *s;
+ BB_CHECK(!bb_is_osp_defined(src), src, );
+ KDB_DEBUG_BB(" %s = *(%s",
+ bbrg_name[dst], bbrg_name[src]);
+ KDB_DEBUG_BB_OFFSET(offset_address, "", ")");
+ offset_address += bb_reg_code_offset(src);
+ KDB_DEBUG_BB_OFFSET(offset_address, " (osp", ")");
+ for (i = 0, s = bb_reg_state->memory;
+ i < bb_reg_state->mem_count;
+ ++i, ++s) {
+ if (s->offset_address == offset_address && bb_is_int_reg(dst)) {
+ bb_reg_code_set_value(dst, s->value);
+ KDB_DEBUG_BB(" value %s", bbrg_name[s->value]);
+ if (s->value == BBRG_OSP) {
+ bb_reg_code_set_offset(dst, s->offset_value);
+ KDB_DEBUG_BB_OFFSET(s->offset_value, "", "");
+ } else {
+ bb_reg_code_set_offset(dst, 0);
+ }
+ defined = 1;
+ }
+ }
+ if (!defined)
+ bb_reg_set_reg(dst, BBRG_UNDEFINED);
+ else
+ KDB_DEBUG_BB("\n");
+}
+
+/* A generic read from an operand. */
+
+static void
+bb_read_operand(const struct bb_operand *operand)
+{
+ int m = 0;
+ if (operand->base_rc)
+ bb_reg_read(operand->base_rc);
+ if (operand->index_rc)
+ bb_reg_read(operand->index_rc);
+ if (bb_is_simple_memory(operand) &&
+ bb_is_osp_defined(operand->base_rc) &&
+ bb_decode.match->usage != BBOU_LEA) {
+ m = (bb_reg_code_offset(operand->base_rc) + operand->disp +
+ KDB_WORD_SIZE - 1) / KDB_WORD_SIZE;
+ bb_memory_params = max(bb_memory_params, m);
+ }
+}
+
+/* A generic write to an operand, resulting in an undefined value in that
+ * location. All well defined operands are handled separately, this function
+ * only handles the opcodes where the result is undefined.
+ */
+
+static void
+bb_write_operand(const struct bb_operand *operand)
+{
+ enum bb_reg_code base_rc = operand->base_rc;
+ if (operand->memory) {
+ if (base_rc)
+ bb_reg_read(base_rc);
+ if (operand->index_rc)
+ bb_reg_read(operand->index_rc);
+ } else if (operand->reg && base_rc) {
+ bb_reg_set_undef(base_rc);
+ }
+ if (bb_is_simple_memory(operand) && bb_is_osp_defined(base_rc)) {
+ int offset;
+ offset = bb_reg_code_offset(base_rc) + operand->disp;
+ offset = ALIGN(offset - KDB_WORD_SIZE + 1, KDB_WORD_SIZE);
+ bb_delete_memory(offset);
+ }
+}
+
+/* Adjust a register that contains a stack pointer */
+
+static void
+bb_adjust_osp(enum bb_reg_code reg, int adjust)
+{
+ int offset = bb_reg_code_offset(reg), old_offset = offset;
+ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[reg]);
+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", " -> ");
+ offset += adjust;
+ bb_reg_code_set_offset(reg, offset);
+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", "\n");
+ /* When RSP is adjusted upwards, it invalidates any memory
+ * stored between the old and current stack offsets.
+ */
+ if (reg == BBRG_RSP) {
+ while (old_offset < bb_reg_code_offset(reg)) {
+ bb_delete_memory(old_offset);
+ old_offset += KDB_WORD_SIZE;
+ }
+ }
+}
+
+/* The current instruction adjusts a register that contains a stack pointer.
+ * Direction is 1 or -1, depending on whether the instruction is add/lea or
+ * sub.
+ */
+
+static void
+bb_adjust_osp_instruction(int direction)
+{
+ enum bb_reg_code dst_reg = bb_decode.dst.base_rc;
+ if (bb_decode.src.immediate ||
+ bb_decode.match->usage == BBOU_LEA /* lea has its own checks */) {
+ int adjust = direction * bb_decode.src.disp;
+ bb_adjust_osp(dst_reg, adjust);
+ } else {
+ /* variable stack adjustment, osp offset is not well defined */
+ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[dst_reg]);
+ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(dst_reg), "", " -> undefined\n");
+ bb_reg_code_set_value(dst_reg, BBRG_UNDEFINED);
+ bb_reg_code_set_offset(dst_reg, 0);
+ }
+}
+
+/* Some instructions using memory have an explicit length suffix (b, w, l, q).
+ * The equivalent instructions using a register imply the length from the
+ * register name. Deduce the operand length.
+ */
+
+static int
+bb_operand_length(const struct bb_operand *operand, char opcode_suffix)
+{
+ int l = 0;
+ switch (opcode_suffix) {
+ case 'b':
+ l = 8;
+ break;
+ case 'w':
+ l = 16;
+ break;
+ case 'l':
+ l = 32;
+ break;
+ case 'q':
+ l = 64;
+ break;
+ }
+ if (l == 0 && operand->reg) {
+ switch (strlen(operand->base)) {
+ case 3:
+ switch (operand->base[2]) {
+ case 'h':
+ case 'l':
+ l = 8;
+ break;
+ default:
+ l = 16;
+ break;
+ }
+ case 4:
+ if (operand->base[1] == 'r')
+ l = 64;
+ else
+ l = 32;
+ break;
+ }
+ }
+ return l;
+}
+
+static int
+bb_reg_state_size(const struct bb_reg_state *state)
+{
+ return sizeof(*state) +
+ state->mem_count * sizeof(state->memory[0]);
+}
+
+/* Canonicalize the current bb_reg_state so it can be compared against
+ * previously created states. Sort the memory entries in descending order of
+ * offset_address (stack grows down). Empty slots are moved to the end of the
+ * list and trimmed.
+ */
+
+static void
+bb_reg_state_canonicalize(void)
+{
+ int i, order, changed;
+ struct bb_memory_contains *p1, *p2, temp;
+ do {
+ changed = 0;
+ for (i = 0, p1 = bb_reg_state->memory;
+ i < bb_reg_state->mem_count-1;
+ ++i, ++p1) {
+ p2 = p1 + 1;
+ if (p2->value == BBRG_UNDEFINED) {
+ order = 0;
+ } else if (p1->value == BBRG_UNDEFINED) {
+ order = 1;
+ } else if (p1->offset_address < p2->offset_address) {
+ order = 1;
+ } else if (p1->offset_address > p2->offset_address) {
+ order = -1;
+ } else {
+ order = 0;
+ }
+ if (order > 0) {
+ temp = *p2;
+ *p2 = *p1;
+ *p1 = temp;
+ changed = 1;
+ }
+ }
+ } while(changed);
+ for (i = 0, p1 = bb_reg_state->memory;
+ i < bb_reg_state_max;
+ ++i, ++p1) {
+ if (p1->value != BBRG_UNDEFINED)
+ bb_reg_state->mem_count = i + 1;
+ }
+ bb_reg_state_print(bb_reg_state);
+}
+
+static int
+bb_special_case(bfd_vma to)
+{
+ int i, j, rsp_offset, expect_offset, offset, errors = 0, max_errors = 40;
+ enum bb_reg_code reg, expect_value, value;
+ struct bb_name_state *r;
+
+ for (i = 0, r = bb_special_cases;
+ i < ARRAY_SIZE(bb_special_cases);
+ ++i, ++r) {
+ if (to == r->address &&
+ (r->fname == NULL || strcmp(bb_func_name, r->fname) == 0))
+ goto match;
+ }
+ /* Some inline assembler code has jumps to .fixup sections which result
+ * in out of line transfers with undefined state, ignore them.
+ */
+ if (strcmp(bb_func_name, "strnlen_user") == 0 ||
+ strcmp(bb_func_name, "copy_from_user") == 0)
+ return 1;
+ return 0;
+
+match:
+ /* Check the running registers match */
+ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
+ expect_value = r->regs[reg].value;
+ if (test_bit(expect_value, r->skip_regs.bits)) {
+ /* this regs entry is not defined for this label */
+ continue;
+ }
+ if (expect_value == BBRG_UNDEFINED)
+ continue;
+ expect_offset = r->regs[reg].offset;
+ value = bb_reg_code_value(reg);
+ offset = bb_reg_code_offset(reg);
+ if (expect_value == value &&
+ (value != BBRG_OSP || r->osp_offset == offset))
+ continue;
+ kdb_printf("%s: Expected %s to contain %s",
+ __FUNCTION__,
+ bbrg_name[reg],
+ bbrg_name[expect_value]);
+ if (r->osp_offset)
+ KDB_DEBUG_BB_OFFSET_PRINTF(r->osp_offset, "", "");
+ kdb_printf(". It actually contains %s", bbrg_name[value]);
+ if (offset)
+ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
+ kdb_printf("\n");
+ ++errors;
+ if (max_errors-- == 0)
+ goto fail;
+ }
+ /* Check that any memory data on stack matches */
+ i = j = 0;
+ while (i < bb_reg_state->mem_count &&
+ j < r->mem_size) {
+ expect_value = r->mem[j].value;
+ if (test_bit(expect_value, r->skip_mem.bits) ||
+ expect_value == BBRG_UNDEFINED) {
+ /* this memory slot is not defined for this label */
+ ++j;
+ continue;
+ }
+ rsp_offset = bb_reg_state->memory[i].offset_address -
+ bb_reg_code_offset(BBRG_RSP);
+ if (rsp_offset >
+ r->mem[j].offset_address) {
+ /* extra slots in memory are OK */
+ ++i;
+ } else if (rsp_offset <
+ r->mem[j].offset_address) {
+ /* Required memory slot is missing */
+ kdb_printf("%s: Invalid bb_reg_state.memory, "
+ "missing memory entry[%d] %s\n",
+ __FUNCTION__, j, bbrg_name[expect_value]);
+ ++errors;
+ if (max_errors-- == 0)
+ goto fail;
+ ++j;
+ } else {
+ if (bb_reg_state->memory[i].offset_value ||
+ bb_reg_state->memory[i].value != expect_value) {
+ /* memory slot is present but contains wrong
+ * value.
+ */
+ kdb_printf("%s: Invalid bb_reg_state.memory, "
+ "wrong value in slot %d, "
+ "should be %s, it is %s\n",
+ __FUNCTION__, i,
+ bbrg_name[expect_value],
+ bbrg_name[bb_reg_state->memory[i].value]);
+ ++errors;
+ if (max_errors-- == 0)
+ goto fail;
+ }
+ ++i;
+ ++j;
+ }
+ }
+ while (j < r->mem_size) {
+ expect_value = r->mem[j].value;
+ if (test_bit(expect_value, r->skip_mem.bits) ||
+ expect_value == BBRG_UNDEFINED)
+ ++j;
+ else
+ break;
+ }
+ if (j != r->mem_size) {
+ /* Hit end of memory before testing all the pt_reg slots */
+ kdb_printf("%s: Invalid bb_reg_state.memory, "
+ "missing trailing entries\n",
+ __FUNCTION__);
+ ++errors;
+ if (max_errors-- == 0)
+ goto fail;
+ }
+ if (errors)
+ goto fail;
+ return 1;
+fail:
+ kdb_printf("%s: on transfer to %s\n", __FUNCTION__, r->name);
+ bb_giveup = 1;
+ return 1;
+}
+
+/* Transfer of control to a label outside the current function. If the
+ * transfer is to a known common code path then do a sanity check on the state
+ * at this point.
+ */
+
+static void
+bb_sanity_check(int type)
+{
+ enum bb_reg_code expect, actual;
+ int i, offset, error = 0;
+
+ for (i = 0; i < ARRAY_SIZE(bb_preserved_reg); ++i) {
+ expect = bb_preserved_reg[i];
+ actual = bb_reg_code_value(expect);
+ offset = bb_reg_code_offset(expect);
+ if (expect == actual)
+ continue;
+ /* type == 1 is sysret/sysexit, ignore RSP */
+ if (type && expect == BBRG_RSP)
+ continue;
+ /* type == 1 is sysret/sysexit, ignore RBP for i386 */
+ /* We used to have "#ifndef CONFIG_X86_64" for the type=1 RBP
+ * test; however, x86_64 can run ia32 compatible mode and
+ * hit this problem. Perform the following test anyway!
+ */
+ if (type && expect == BBRG_RBP)
+ continue;
+ /* RSP should contain OSP+0. Except for ptregscall_common and
+ * ia32_ptregs_common, they get a partial pt_regs, fudge the
+ * stack to make it a full pt_regs then reverse the effect on
+ * exit, so the offset is -0x50 on exit.
+ */
+ if (expect == BBRG_RSP &&
+ bb_is_osp_defined(expect) &&
+ (offset == 0 ||
+ (offset == -0x50 &&
+ (strcmp(bb_func_name, "ptregscall_common") == 0 ||
+ strcmp(bb_func_name, "ia32_ptregs_common") == 0))))
+ continue;
+ /* The put_user and save_paranoid functions are special.
+ * %rbx gets clobbered */
+ if (expect == BBRG_RBX &&
+ (strncmp(bb_func_name, "__put_user_", 11) == 0 ||
+ strcmp(bb_func_name, "save_paranoid") == 0))
+ continue;
+ /* Ignore rbp and rsp for error_entry */
+ if ((strcmp(bb_func_name, "error_entry") == 0) &&
+ (expect == BBRG_RBX ||
+ (expect == BBRG_RSP && bb_is_osp_defined(expect) && offset == -0x10)))
+ continue;
+ kdb_printf("%s: Expected %s, got %s",
+ __FUNCTION__,
+ bbrg_name[expect], bbrg_name[actual]);
+ if (offset)
+ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
+ kdb_printf("\n");
+ error = 1;
+ }
+ BB_CHECK(error, error, );
+}
+
+/* Transfer of control. Follow the arc and save the current state as input to
+ * another basic block.
+ */
+
+static void
+bb_transfer(bfd_vma from, bfd_vma to, unsigned int drop_through)
+{
+ int i, found;
+ size_t size;
+ struct bb* bb = NULL; /*stupid gcc */
+ struct bb_jmp *bb_jmp;
+ struct bb_reg_state *state;
+ bb_reg_state_canonicalize();
+ found = 0;
+ for (i = 0; i < bb_jmp_count; ++i) {
+ bb_jmp = bb_jmp_list + i;
+ if (bb_jmp->from == from &&
+ bb_jmp->to == to &&
+ bb_jmp->drop_through == drop_through) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ /* Transfer outside the current function. Check the special
+ * cases (mainly in entry.S) first. If it is not a known
+ * special case then check if the target address is the start
+ * of a function or not. If it is the start of a function then
+ * assume tail recursion and require that the state be the same
+ * as on entry. Otherwise assume out of line code (e.g.
+ * spinlock contention path) and ignore it, the state can be
+ * anything.
+ */
+ kdb_symtab_t symtab;
+ if (bb_special_case(to))
+ return;
+ kdbnearsym(to, &symtab);
+ if (symtab.sym_start != to)
+ return;
+ bb_sanity_check(0);
+ if (bb_giveup)
+ return;
+#ifdef NO_SIBLINGS
+ /* Only print this message when the kernel is compiled with
+ * -fno-optimize-sibling-calls. Otherwise it would print a
+ * message for every tail recursion call. If you see the
+ * message below then you probably have an assembler label that
+ * is not listed in the special cases.
+ */
+ kdb_printf(" not matched: from "
+ kdb_bfd_vma_fmt0
+ " to " kdb_bfd_vma_fmt0
+ " drop_through %d bb_jmp[%d]\n",
+ from, to, drop_through, i);
+#endif /* NO_SIBLINGS */
+ return;
+ }
+ KDB_DEBUG_BB(" matched: from " kdb_bfd_vma_fmt0
+ " to " kdb_bfd_vma_fmt0
+ " drop_through %d bb_jmp[%d]\n",
+ from, to, drop_through, i);
+ found = 0;
+ for (i = 0; i < bb_count; ++i) {
+ bb = bb_list[i];
+ if (bb->start == to) {
+ found = 1;
+ break;
+ }
+ }
+ BB_CHECK(!found, to, );
+ /* If the register state for this arc has already been set (we are
+ * rescanning the block that originates the arc) and the state is the
+ * same as the previous state for this arc then this input to the
+ * target block is the same as last time, so there is no need to rescan
+ * the target block.
+ */
+ state = bb_jmp->state;
+ size = bb_reg_state_size(bb_reg_state);
+ if (state) {
+ bb_reg_state->ref_count = state->ref_count;
+ if (memcmp(state, bb_reg_state, size) == 0) {
+ KDB_DEBUG_BB(" no state change\n");
+ return;
+ }
+ if (--state->ref_count == 0)
+ debug_kfree(state);
+ bb_jmp->state = NULL;
+ }
+ /* New input state is required. To save space, check if any other arcs
+ * have the same state and reuse them where possible. The overall set
+ * of inputs to the target block is now different so the target block
+ * must be rescanned.
+ */
+ bb->changed = 1;
+ for (i = 0; i < bb_jmp_count; ++i) {
+ state = bb_jmp_list[i].state;
+ if (!state)
+ continue;
+ bb_reg_state->ref_count = state->ref_count;
+ if (memcmp(state, bb_reg_state, size) == 0) {
+ KDB_DEBUG_BB(" reuse bb_jmp[%d]\n", i);
+ bb_jmp->state = state;
+ ++state->ref_count;
+ return;
+ }
+ }
+ state = debug_kmalloc(size, GFP_ATOMIC);
+ if (!state) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+ memcpy(state, bb_reg_state, size);
+ state->ref_count = 1;
+ bb_jmp->state = state;
+ KDB_DEBUG_BB(" new state %p\n", state);
+}
+
+/* Isolate the processing for 'mov' so it can be used for 'xadd'/'xchg' as
+ * well.
+ *
+ * xadd/xchg expect this function to return BBOU_NOP for special cases,
+ * otherwise it returns BBOU_RSWD. All special cases must be handled entirely
+ * within this function, including doing bb_read_operand or bb_write_operand
+ * where necessary.
+ */
+
+static enum bb_operand_usage
+bb_usage_mov(const struct bb_operand *src, const struct bb_operand *dst, int l)
+{
+ int full_register_src, full_register_dst;
+ full_register_src = bb_operand_length(src, bb_decode.opcode[l])
+ == KDB_WORD_SIZE * 8;
+ full_register_dst = bb_operand_length(dst, bb_decode.opcode[l])
+ == KDB_WORD_SIZE * 8;
+ /* If both src and dst are full integer registers then record the
+ * register change.
+ */
+ if (src->reg &&
+ bb_is_int_reg(src->base_rc) &&
+ dst->reg &&
+ bb_is_int_reg(dst->base_rc) &&
+ full_register_src &&
+ full_register_dst) {
+ /* Special case for the code that switches stacks in
+ * jprobe_return. That code must modify RSP but it does it in
+ * a well defined manner. Do not invalidate RSP.
+ */
+ if (src->base_rc == BBRG_RBX &&
+ dst->base_rc == BBRG_RSP &&
+ strcmp(bb_func_name, "jprobe_return") == 0) {
+ bb_read_operand(src);
+ return BBOU_NOP;
+ }
+ /* math_abort takes the equivalent of a longjmp structure and
+ * resets the stack. Ignore this, it leaves RSP well defined.
+ */
+ if (dst->base_rc == BBRG_RSP &&
+ strcmp(bb_func_name, "math_abort") == 0) {
+ bb_read_operand(src);
+ return BBOU_NOP;
+ }
+ bb_reg_set_reg(dst->base_rc, src->base_rc);
+ return BBOU_NOP;
+ }
+ /* If the move is from a full integer register to stack then record it.
+ */
+ if (src->reg &&
+ bb_is_simple_memory(dst) &&
+ bb_is_osp_defined(dst->base_rc) &&
+ full_register_src) {
+ /* Ugly special case. Initializing list heads on stack causes
+ * false references to stack variables when the list head is
+ * used. Static code analysis cannot detect that the list head
+ * has been changed by a previous execution loop and that a
+ * basic block is only executed after the list head has been
+ * changed.
+ *
+ * These false references can result in valid stack variables
+ * being incorrectly cleared on some logic paths. Ignore
+ * stores to stack variables which point to themselves or to
+ * the previous word so the list head initialization is not
+ * recorded.
+ */
+ if (bb_is_osp_defined(src->base_rc)) {
+ int stack1 = bb_reg_code_offset(src->base_rc);
+ int stack2 = bb_reg_code_offset(dst->base_rc) +
+ dst->disp;
+ if (stack1 == stack2 ||
+ stack1 == stack2 - KDB_WORD_SIZE)
+ return BBOU_NOP;
+ }
+ bb_memory_set_reg(dst->base_rc, src->base_rc, dst->disp);
+ return BBOU_NOP;
+ }
+ /* If the move is from stack to a full integer register then record it.
+ */
+ if (bb_is_simple_memory(src) &&
+ bb_is_osp_defined(src->base_rc) &&
+ dst->reg &&
+ bb_is_int_reg(dst->base_rc) &&
+ full_register_dst) {
+#ifdef CONFIG_X86_32
+ /* mov from TSS_sysenter_sp0+offset to esp to fix up the
+ * sysenter stack, it leaves esp well defined. mov
+ * TSS_ysenter_sp0+offset(%esp),%esp is followed by up to 5
+ * push instructions to mimic the hardware stack push. If
+ * TSS_sysenter_sp0 is offset then only 3 words will be
+ * pushed.
+ */
+ if (dst->base_rc == BBRG_RSP &&
+ src->disp >= TSS_sysenter_sp0 &&
+ bb_is_osp_defined(BBRG_RSP)) {
+ int pushes;
+ pushes = src->disp == TSS_sysenter_sp0 ? 5 : 3;
+ bb_reg_code_set_offset(BBRG_RSP,
+ bb_reg_code_offset(BBRG_RSP) +
+ pushes * KDB_WORD_SIZE);
+ KDB_DEBUG_BB_OFFSET(
+ bb_reg_code_offset(BBRG_RSP),
+ " sysenter fixup, RSP",
+ "\n");
+ return BBOU_NOP;
+ }
+#endif /* CONFIG_X86_32 */
+ bb_read_operand(src);
+ bb_reg_set_memory(dst->base_rc, src->base_rc, src->disp);
+ return BBOU_NOP;
+ }
+ /* move %gs:0x<nn>,%rsp is used to unconditionally switch to another
+ * stack. Ignore this special case, it is handled by the stack
+ * unwinding code.
+ */
+ if (src->segment &&
+ strcmp(src->segment, "%gs") == 0 &&
+ dst->reg &&
+ dst->base_rc == BBRG_RSP)
+ return BBOU_NOP;
+ /* move %reg,%reg is a nop */
+ if (src->reg &&
+ dst->reg &&
+ !src->segment &&
+ !dst->segment &&
+ strcmp(src->base, dst->base) == 0)
+ return BBOU_NOP;
+ /* Special case for the code that switches stacks in the scheduler
+ * (switch_to()). That code must modify RSP but it does it in a well
+ * defined manner. Do not invalidate RSP.
+ */
+ if (dst->reg &&
+ dst->base_rc == BBRG_RSP &&
+ full_register_dst &&
+ bb_is_scheduler_address()) {
+ bb_read_operand(src);
+ return BBOU_NOP;
+ }
+ /* Special case for the code that switches stacks in resume from
+ * hibernation code. That code must modify RSP but it does it in a
+ * well defined manner. Do not invalidate RSP.
+ */
+ if (src->memory &&
+ dst->reg &&
+ dst->base_rc == BBRG_RSP &&
+ full_register_dst &&
+ strcmp(bb_func_name, "restore_image") == 0) {
+ bb_read_operand(src);
+ return BBOU_NOP;
+ }
+ return BBOU_RSWD;
+}
+
+static enum bb_operand_usage
+bb_usage_xadd(const struct bb_operand *src, const struct bb_operand *dst)
+{
+ /* Simulate xadd as a series of instructions including mov, that way we
+ * get the benefit of all the special cases already handled by
+ * BBOU_MOV.
+ *
+ * tmp = src + dst, src = dst, dst = tmp.
+ *
+ * For tmp, pick a register that is undefined. If all registers are
+ * defined then pick one that is not being used by xadd.
+ */
+ enum bb_reg_code reg = BBRG_UNDEFINED;
+ struct bb_operand tmp;
+ struct bb_reg_contains save_tmp;
+ enum bb_operand_usage usage;
+ int undefined = 0;
+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
+ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
+ undefined = 1;
+ break;
+ }
+ }
+ if (!undefined) {
+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
+ if (reg != src->base_rc &&
+ reg != src->index_rc &&
+ reg != dst->base_rc &&
+ reg != dst->index_rc &&
+ reg != BBRG_RSP)
+ break;
+ }
+ }
+ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
+ save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
+ bb_reg_set_undef(reg);
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.present = 1;
+ tmp.reg = 1;
+ tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
+ if (tmp.base) {
+ tmp.base[0] = '%';
+ strcpy(tmp.base + 1, bbrg_name[reg]);
+ }
+ tmp.base_rc = reg;
+ bb_read_operand(src);
+ bb_read_operand(dst);
+ if (bb_usage_mov(src, dst, sizeof("xadd")-1) == BBOU_NOP)
+ usage = BBOU_RSRD;
+ else
+ usage = BBOU_RSRDWS;
+ bb_usage_mov(&tmp, dst, sizeof("xadd")-1);
+ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
+ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
+ debug_kfree(tmp.base);
+ return usage;
+}
+
+static enum bb_operand_usage
+bb_usage_xchg(const struct bb_operand *src, const struct bb_operand *dst)
+{
+ /* Simulate xchg as a series of mov instructions, that way we get the
+ * benefit of all the special cases already handled by BBOU_MOV.
+ *
+ * mov dst,tmp; mov src,dst; mov tmp,src;
+ *
+ * For tmp, pick a register that is undefined. If all registers are
+ * defined then pick one that is not being used by xchg.
+ */
+ enum bb_reg_code reg = BBRG_UNDEFINED;
+ int rs = BBOU_RS, rd = BBOU_RD, ws = BBOU_WS, wd = BBOU_WD;
+ struct bb_operand tmp;
+ struct bb_reg_contains save_tmp;
+ int undefined = 0;
+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
+ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
+ undefined = 1;
+ break;
+ }
+ }
+ if (!undefined) {
+ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
+ if (reg != src->base_rc &&
+ reg != src->index_rc &&
+ reg != dst->base_rc &&
+ reg != dst->index_rc &&
+ reg != BBRG_RSP)
+ break;
+ }
+ }
+ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
+ save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.present = 1;
+ tmp.reg = 1;
+ tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
+ if (tmp.base) {
+ tmp.base[0] = '%';
+ strcpy(tmp.base + 1, bbrg_name[reg]);
+ }
+ tmp.base_rc = reg;
+ if (bb_usage_mov(dst, &tmp, sizeof("xchg")-1) == BBOU_NOP)
+ rd = 0;
+ if (bb_usage_mov(src, dst, sizeof("xchg")-1) == BBOU_NOP) {
+ rs = 0;
+ wd = 0;
+ }
+ if (bb_usage_mov(&tmp, src, sizeof("xchg")-1) == BBOU_NOP)
+ ws = 0;
+ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
+ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
+ debug_kfree(tmp.base);
+ return rs | rd | ws | wd;
+}
+
+/* Invalidate all the scratch registers */
+
+static void
+bb_invalidate_scratch_reg(void)
+{
+ int i, j;
+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
+ for (j = 0; j < ARRAY_SIZE(bb_preserved_reg); ++j) {
+ if (i == bb_preserved_reg[j])
+ goto preserved;
+ }
+ bb_reg_set_undef(i);
+preserved:
+ continue;
+ }
+}
+
+static void
+bb_pass2_computed_jmp(const struct bb_operand *src)
+{
+ unsigned long table = src->disp;
+ kdb_machreg_t addr;
+ while (!bb_giveup) {
+ if (kdb_getword(&addr, table, sizeof(addr)))
+ return;
+ if (addr < bb_func_start || addr >= bb_func_end)
+ return;
+ bb_transfer(bb_curr_addr, addr, 0);
+ table += KDB_WORD_SIZE;
+ }
+}
+
+/* The current instruction has been decoded and all the information is in
+ * bb_decode. Based on the opcode, track any operand usage that we care about.
+ */
+
+static void
+bb_usage(void)
+{
+ enum bb_operand_usage usage = bb_decode.match->usage;
+ struct bb_operand *src = &bb_decode.src;
+ struct bb_operand *dst = &bb_decode.dst;
+ struct bb_operand *dst2 = &bb_decode.dst2;
+ int opcode_suffix, operand_length;
+
+ /* First handle all the special usage cases, and map them to a generic
+ * case after catering for the side effects.
+ */
+
+ if (usage == BBOU_IMUL &&
+ src->present && !dst->present && !dst2->present) {
+ /* single operand imul, same effects as mul */
+ usage = BBOU_MUL;
+ }
+
+ /* AT&T syntax uses movs<l1><l2> for move with sign extension, instead
+ * of the Intel movsx. The AT&T syntax causes problems for the opcode
+ * mapping; movs with sign extension needs to be treated as a generic
+ * read src, write dst, but instead it falls under the movs I/O
+ * instruction. Fix it.
+ */
+ if (usage == BBOU_MOVS && strlen(bb_decode.opcode) > 5)
+ usage = BBOU_RSWD;
+
+ /* This switch statement deliberately does not use 'default' at the top
+ * level. That way the compiler will complain if a new BBOU_ enum is
+ * added above and not explicitly handled here.
+ */
+ switch (usage) {
+ case BBOU_UNKNOWN: /* drop through */
+ case BBOU_RS: /* drop through */
+ case BBOU_RD: /* drop through */
+ case BBOU_RSRD: /* drop through */
+ case BBOU_WS: /* drop through */
+ case BBOU_RSWS: /* drop through */
+ case BBOU_RDWS: /* drop through */
+ case BBOU_RSRDWS: /* drop through */
+ case BBOU_WD: /* drop through */
+ case BBOU_RSWD: /* drop through */
+ case BBOU_RDWD: /* drop through */
+ case BBOU_RSRDWD: /* drop through */
+ case BBOU_WSWD: /* drop through */
+ case BBOU_RSWSWD: /* drop through */
+ case BBOU_RDWSWD: /* drop through */
+ case BBOU_RSRDWSWD:
+ break; /* ignore generic usage for now */
+ case BBOU_ADD:
+ /* Special case for add instructions that adjust registers
+ * which are mapping the stack.
+ */
+ if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
+ bb_adjust_osp_instruction(1);
+ usage = BBOU_RS;
+ } else {
+ usage = BBOU_RSRDWD;
+ }
+ break;
+ case BBOU_AND:
+ /* Special case when trying to round the stack pointer
+ * to achieve byte alignment
+ */
+ if (dst->reg && dst->base_rc == BBRG_RSP &&
+ src->immediate && strncmp(bb_func_name, "efi_call", 8) == 0) {
+ usage = BBOU_NOP;
+ } else {
+ usage = BBOU_RSRDWD;
+ }
+ break;
+ case BBOU_CALL:
+ bb_reg_state_print(bb_reg_state);
+ usage = BBOU_NOP;
+ if (bb_is_static_disp(src)) {
+ /* save_args is special. It saves
+ * a partial pt_regs onto the stack and switches
+ * to the interrupt stack.
+ */
+ if (src->disp == bb_save_args) {
+ bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x48);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x40);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x38);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x30);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x28);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x20);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x18);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x10);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x08);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0);
+ /* This is actually on the interrupt stack,
+ * but we fudge it so the unwind works.
+ */
+ bb_memory_set_reg_value(BBRG_RSP, -0x8, BBRG_RBP, 0);
+ bb_reg_set_reg(BBRG_RBP, BBRG_RSP);
+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
+ }
+ /* save_rest juggles the stack frame to append the
+ * rest of the pt_regs onto a stack where SAVE_ARGS
+ * or save_args has already been done.
+ */
+ else if (src->disp == bb_save_rest) {
+ bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x30);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x28);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x20);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x18);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x10);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0x08);
+ }
+ /* error_entry and save_paranoid save a full pt_regs.
+ * Break out so the scratch registers aren't invalidated.
+ */
+ else if (src->disp == bb_error_entry || src->disp == bb_save_paranoid) {
+ bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x70);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x68);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x60);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x58);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x50);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x48);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x40);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x38);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x30);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x28);
+ bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x20);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x18);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x10);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x08);
+ bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0);
+ break;
+ }
+ }
+ /* Invalidate the scratch registers */
+ bb_invalidate_scratch_reg();
+
+ /* These special cases need scratch registers invalidated first */
+ if (bb_is_static_disp(src)) {
+ /* Function sync_regs and save_v86_state are special.
+ * Their return value is the new stack pointer
+ */
+ if (src->disp == bb_sync_regs) {
+ bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
+ } else if (src->disp == bb_save_v86_state) {
+ bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
+ bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE);
+ }
+ }
+ break;
+ case BBOU_CBW:
+ /* Convert word in RAX. Read RAX, write RAX */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RAX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_CMOV:
+ /* cmove %gs:0x<nn>,%rsp is used to conditionally switch to
+ * another stack. Ignore this special case, it is handled by
+ * the stack unwinding code.
+ */
+ if (src->segment &&
+ strcmp(src->segment, "%gs") == 0 &&
+ dst->reg &&
+ dst->base_rc == BBRG_RSP)
+ usage = BBOU_NOP;
+ else
+ usage = BBOU_RSWD;
+ break;
+ case BBOU_CMPXCHG:
+ /* Read RAX, write RAX plus src read, dst write */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RAX);
+ usage = BBOU_RSWD;
+ break;
+ case BBOU_CMPXCHGD:
+ /* Read RAX, RBX, RCX, RDX, write RAX, RDX plus src read/write */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_read(BBRG_RBX);
+ bb_reg_read(BBRG_RCX);
+ bb_reg_read(BBRG_RDX);
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_RSWS;
+ break;
+ case BBOU_CPUID:
+ /* Read RAX, write RAX, RBX, RCX, RDX */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RBX);
+ bb_reg_set_undef(BBRG_RCX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_CWD:
+ /* Convert word in RAX, RDX. Read RAX, write RDX */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_DIV: /* drop through */
+ case BBOU_IDIV:
+ /* The 8 bit variants only affect RAX, the 16, 32 and 64 bit
+ * variants affect RDX as well.
+ */
+ switch (usage) {
+ case BBOU_DIV:
+ opcode_suffix = bb_decode.opcode[3];
+ break;
+ case BBOU_IDIV:
+ opcode_suffix = bb_decode.opcode[4];
+ break;
+ default:
+ opcode_suffix = 'q';
+ break;
+ }
+ operand_length = bb_operand_length(src, opcode_suffix);
+ bb_reg_read(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RAX);
+ if (operand_length != 8) {
+ bb_reg_read(BBRG_RDX);
+ bb_reg_set_undef(BBRG_RDX);
+ }
+ usage = BBOU_RS;
+ break;
+ case BBOU_IMUL:
+ /* Only the two and three operand forms get here. The one
+ * operand form is treated as mul.
+ */
+ if (dst2->present) {
+ /* The three operand form is a special case, read the first two
+ * operands, write the third.
+ */
+ bb_read_operand(src);
+ bb_read_operand(dst);
+ bb_write_operand(dst2);
+ usage = BBOU_NOP;
+ } else {
+ usage = BBOU_RSRDWD;
+ }
+ break;
+ case BBOU_IRET:
+ bb_sanity_check(0);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_JMP:
+ if (bb_is_static_disp(src))
+ bb_transfer(bb_curr_addr, src->disp, 0);
+ else if (src->indirect &&
+ src->disp &&
+ src->base == NULL &&
+ src->index &&
+ src->scale == KDB_WORD_SIZE)
+ bb_pass2_computed_jmp(src);
+ usage = BBOU_RS;
+ break;
+ case BBOU_LAHF:
+ /* Write RAX */
+ bb_reg_set_undef(BBRG_RAX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_LEA:
+ /* dst = src + disp. Often used to calculate offsets into the
+ * stack, so check if it uses a stack pointer.
+ */
+ usage = BBOU_RSWD;
+ if (bb_is_simple_memory(src)) {
+ if (bb_is_osp_defined(src->base_rc)) {
+ bb_reg_set_reg(dst->base_rc, src->base_rc);
+ bb_adjust_osp_instruction(1);
+ usage = BBOU_RS;
+ } else if (src->disp == 0 &&
+ src->base_rc == dst->base_rc) {
+ /* lea 0(%reg),%reg is generated by i386
+ * GENERIC_NOP7.
+ */
+ usage = BBOU_NOP;
+ } else if (src->disp == 4096 &&
+ (src->base_rc == BBRG_R8 ||
+ src->base_rc == BBRG_RDI) &&
+ strcmp(bb_func_name, "relocate_kernel") == 0) {
+ /* relocate_kernel: setup a new stack at the
+ * end of the physical control page, using
+ * (x86_64) lea 4096(%r8),%rsp or (i386) lea
+ * 4096(%edi),%esp
+ */
+ usage = BBOU_NOP;
+ }
+ }
+ break;
+ case BBOU_LEAVE:
+ /* RSP = RBP; RBP = *(RSP); RSP += KDB_WORD_SIZE; */
+ bb_reg_set_reg(BBRG_RSP, BBRG_RBP);
+ if (bb_is_osp_defined(BBRG_RSP))
+ bb_reg_set_memory(BBRG_RBP, BBRG_RSP, 0);
+ else
+ bb_reg_set_undef(BBRG_RBP);
+ if (bb_is_osp_defined(BBRG_RSP))
+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
+ /* common_interrupt uses leave in a non-standard manner */
+ if (strcmp(bb_func_name, "common_interrupt") != 0)
+ bb_sanity_check(0);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_LODS:
+ /* Read RSI, write RAX, RSI */
+ bb_reg_read(BBRG_RSI);
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RSI);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_LOOP:
+ /* Read and write RCX */
+ bb_reg_read(BBRG_RCX);
+ bb_reg_set_undef(BBRG_RCX);
+ if (bb_is_static_disp(src))
+ bb_transfer(bb_curr_addr, src->disp, 0);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_LSS:
+ /* lss offset(%esp),%esp leaves esp well defined */
+ if (dst->reg &&
+ dst->base_rc == BBRG_RSP &&
+ bb_is_simple_memory(src) &&
+ src->base_rc == BBRG_RSP) {
+ bb_adjust_osp(BBRG_RSP, 2*KDB_WORD_SIZE + src->disp);
+ usage = BBOU_NOP;
+ } else {
+ usage = BBOU_RSWD;
+ }
+ break;
+ case BBOU_MONITOR:
+ /* Read RAX, RCX, RDX */
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RCX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_MOV:
+ usage = bb_usage_mov(src, dst, sizeof("mov")-1);
+ break;
+ case BBOU_MOVS:
+ /* Read RSI, RDI, write RSI, RDI */
+ bb_reg_read(BBRG_RSI);
+ bb_reg_read(BBRG_RDI);
+ bb_reg_set_undef(BBRG_RSI);
+ bb_reg_set_undef(BBRG_RDI);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_MUL:
+ /* imul (one operand form only) or mul. Read RAX. If the
+ * operand length is not 8 then write RDX.
+ */
+ if (bb_decode.opcode[0] == 'i')
+ opcode_suffix = bb_decode.opcode[4];
+ else
+ opcode_suffix = bb_decode.opcode[3];
+ operand_length = bb_operand_length(src, opcode_suffix);
+ bb_reg_read(BBRG_RAX);
+ if (operand_length != 8)
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_MWAIT:
+ /* Read RAX, RCX */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_read(BBRG_RCX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_NOP:
+ break;
+ case BBOU_OUTS:
+ /* Read RSI, RDX, write RSI */
+ bb_reg_read(BBRG_RSI);
+ bb_reg_read(BBRG_RDX);
+ bb_reg_set_undef(BBRG_RSI);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_POP:
+ /* Complicated by the fact that you can pop from top of stack
+ * to a stack location, for this case the destination location
+ * is calculated after adjusting RSP. Analysis of the kernel
+ * code shows that gcc only uses this strange format to get the
+ * flags into a local variable, e.g. pushf; popl 0x10(%esp); so
+ * I am going to ignore this special case.
+ */
+ usage = BBOU_WS;
+ if (!bb_is_osp_defined(BBRG_RSP)) {
+ if (!bb_is_scheduler_address()) {
+ kdb_printf("pop when BBRG_RSP is undefined?\n");
+ bb_giveup = 1;
+ }
+ } else {
+ if (src->reg) {
+ bb_reg_set_memory(src->base_rc, BBRG_RSP, 0);
+ usage = BBOU_NOP;
+ }
+ /* pop %rsp does not adjust rsp */
+ if (!src->reg ||
+ src->base_rc != BBRG_RSP)
+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
+ }
+ break;
+ case BBOU_POPF:
+ /* Do not care about flags, just adjust RSP */
+ if (!bb_is_osp_defined(BBRG_RSP)) {
+ if (!bb_is_scheduler_address()) {
+ kdb_printf("popf when BBRG_RSP is undefined?\n");
+ bb_giveup = 1;
+ }
+ } else {
+ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
+ }
+ usage = BBOU_WS;
+ break;
+ case BBOU_PUSH:
+ /* Complicated by the fact that you can push from a stack
+ * location to top of stack, the source location is calculated
+ * before adjusting RSP. Analysis of the kernel code shows
+ * that gcc only uses this strange format to restore the flags
+ * from a local variable, e.g. pushl 0x10(%esp); popf; so I am
+ * going to ignore this special case.
+ */
+ usage = BBOU_RS;
+ if (!bb_is_osp_defined(BBRG_RSP)) {
+ if (!bb_is_scheduler_address()) {
+ kdb_printf("push when BBRG_RSP is undefined?\n");
+ bb_giveup = 1;
+ }
+ } else {
+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
+ if (src->reg &&
+ bb_reg_code_offset(BBRG_RSP) <= 0)
+ bb_memory_set_reg(BBRG_RSP, src->base_rc, 0);
+ }
+ break;
+ case BBOU_PUSHF:
+ /* Do not care about flags, just adjust RSP */
+ if (!bb_is_osp_defined(BBRG_RSP)) {
+ if (!bb_is_scheduler_address()) {
+ kdb_printf("pushf when BBRG_RSP is undefined?\n");
+ bb_giveup = 1;
+ }
+ } else {
+ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
+ }
+ usage = BBOU_WS;
+ break;
+ case BBOU_RDMSR:
+ /* Read RCX, write RAX, RDX */
+ bb_reg_read(BBRG_RCX);
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_RDTSC:
+ /* Write RAX, RDX */
+ bb_reg_set_undef(BBRG_RAX);
+ bb_reg_set_undef(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_RET:
+ usage = BBOU_NOP;
+ if (src->immediate && bb_is_osp_defined(BBRG_RSP)) {
+ bb_adjust_osp(BBRG_RSP, src->disp);
+ }
+ /* Functions that restore state which was saved by another
+ * function or build new kernel stacks. We cannot verify what
+ * is being restored so skip the sanity check.
+ */
+ if (strcmp(bb_func_name, "restore_image") == 0 ||
+ strcmp(bb_func_name, "relocate_kernel") == 0 ||
+ strcmp(bb_func_name, "identity_mapped") == 0 ||
+ strcmp(bb_func_name, "xen_iret_crit_fixup") == 0 ||
+ strcmp(bb_func_name, "math_abort") == 0 ||
+ strcmp(bb_func_name, "save_args") == 0 ||
+ strcmp(bb_func_name, "kretprobe_trampoline_holder") == 0)
+ break;
+ bb_sanity_check(0);
+ break;
+ case BBOU_SAHF:
+ /* Read RAX */
+ bb_reg_read(BBRG_RAX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_SCAS:
+ /* Read RAX, RDI, write RDI */
+ bb_reg_read(BBRG_RAX);
+ bb_reg_read(BBRG_RDI);
+ bb_reg_set_undef(BBRG_RDI);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_SUB:
+ /* Special case for sub instructions that adjust registers
+ * which are mapping the stack.
+ */
+ if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
+ bb_adjust_osp_instruction(-1);
+ usage = BBOU_RS;
+ } else {
+ usage = BBOU_RSRDWD;
+ }
+ break;
+ case BBOU_SYSEXIT:
+ bb_sanity_check(1);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_SYSRET:
+ bb_sanity_check(1);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_WRMSR:
+ /* Read RCX, RAX, RDX */
+ bb_reg_read(BBRG_RCX);
+ bb_reg_read(BBRG_RAX);
+ bb_reg_read(BBRG_RDX);
+ usage = BBOU_NOP;
+ break;
+ case BBOU_XADD:
+ usage = bb_usage_xadd(src, dst);
+ break;
+ case BBOU_XCHG:
+ /* i386 do_IRQ with 4K stacks does xchg %ebx,%esp; call
+ * irq_handler; mov %ebx,%esp; to switch stacks. Ignore this
+ * stack switch when tracking registers, it is handled by
+ * higher level backtrace code. Convert xchg %ebx,%esp to mov
+ * %esp,%ebx so the later mov %ebx,%esp becomes a NOP and the
+ * stack remains defined so we can backtrace through do_IRQ's
+ * stack switch.
+ *
+ * Ditto for do_softirq.
+ */
+ if (src->reg &&
+ dst->reg &&
+ src->base_rc == BBRG_RBX &&
+ dst->base_rc == BBRG_RSP &&
+ (strcmp(bb_func_name, "do_IRQ") == 0 ||
+ strcmp(bb_func_name, "do_softirq") == 0)) {
+ strcpy(bb_decode.opcode, "mov");
+ usage = bb_usage_mov(dst, src, sizeof("mov")-1);
+ } else {
+ usage = bb_usage_xchg(src, dst);
+ }
+ break;
+ case BBOU_XOR:
+ /* xor %reg,%reg only counts as a register write, the original
+ * contents of reg are irrelevant.
+ */
+ if (src->reg && dst->reg && src->base_rc == dst->base_rc)
+ usage = BBOU_WS;
+ else
+ usage = BBOU_RSRDWD;
+ break;
+ }
+
+ /* The switch statement above handled all the special cases. Every
+ * opcode should now have a usage of NOP or one of the generic cases.
+ */
+ if (usage == BBOU_UNKNOWN || usage == BBOU_NOP) {
+ /* nothing to do */
+ } else if (usage >= BBOU_RS && usage <= BBOU_RSRDWSWD) {
+ if (usage & BBOU_RS)
+ bb_read_operand(src);
+ if (usage & BBOU_RD)
+ bb_read_operand(dst);
+ if (usage & BBOU_WS)
+ bb_write_operand(src);
+ if (usage & BBOU_WD)
+ bb_write_operand(dst);
+ } else {
+ kdb_printf("%s: opcode not fully handled\n", __FUNCTION__);
+ if (!KDB_DEBUG(BB)) {
+ bb_print_opcode();
+ if (bb_decode.src.present)
+ bb_print_operand("src", &bb_decode.src);
+ if (bb_decode.dst.present)
+ bb_print_operand("dst", &bb_decode.dst);
+ if (bb_decode.dst2.present)
+ bb_print_operand("dst2", &bb_decode.dst2);
+ }
+ bb_giveup = 1;
+ }
+}
+
+static void
+bb_parse_buffer(void)
+{
+ char *p, *src, *dst = NULL, *dst2 = NULL;
+ int paren = 0;
+ p = bb_buffer;
+ memset(&bb_decode, 0, sizeof(bb_decode));
+ KDB_DEBUG_BB(" '%s'\n", p);
+ p += strcspn(p, ":"); /* skip address and function name+offset: */
+ if (*p++ != ':') {
+ kdb_printf("%s: cannot find ':' in buffer '%s'\n",
+ __FUNCTION__, bb_buffer);
+ bb_giveup = 1;
+ return;
+ }
+ p += strspn(p, " \t"); /* step to opcode */
+ if (strncmp(p, "(bad)", 5) == 0)
+ strcpy(p, "nop");
+ /* separate any opcode prefix */
+ if (strncmp(p, "lock", 4) == 0 ||
+ strncmp(p, "rep", 3) == 0 ||
+ strncmp(p, "rex", 3) == 0 ||
+ strncmp(p, "addr", 4) == 0) {
+ bb_decode.prefix = p;
+ p += strcspn(p, " \t");
+ *p++ = '\0';
+ p += strspn(p, " \t");
+ }
+ bb_decode.opcode = p;
+ strsep(&p, " \t"); /* step to end of opcode */
+ if (bb_parse_opcode())
+ return;
+ if (!p)
+ goto no_operands;
+ p += strspn(p, " \t"); /* step to operand(s) */
+ if (!*p)
+ goto no_operands;
+ src = p;
+ p = strsep(&p, " \t"); /* strip comments after operands */
+ /* split 'src','dst' but ignore ',' inside '(' ')' */
+ while (*p) {
+ if (*p == '(') {
+ ++paren;
+ } else if (*p == ')') {
+ --paren;
+ } else if (*p == ',' && paren == 0) {
+ *p = '\0';
+ if (dst)
+ dst2 = p+1;
+ else
+ dst = p+1;
+ }
+ ++p;
+ }
+ bb_parse_operand(src, &bb_decode.src);
+ if (KDB_DEBUG(BB))
+ bb_print_operand("src", &bb_decode.src);
+ if (dst && !bb_giveup) {
+ bb_parse_operand(dst, &bb_decode.dst);
+ if (KDB_DEBUG(BB))
+ bb_print_operand("dst", &bb_decode.dst);
+ }
+ if (dst2 && !bb_giveup) {
+ bb_parse_operand(dst2, &bb_decode.dst2);
+ if (KDB_DEBUG(BB))
+ bb_print_operand("dst2", &bb_decode.dst2);
+ }
+no_operands:
+ if (!bb_giveup)
+ bb_usage();
+}
+
+static int
+bb_dis_pass2(PTR file, const char *fmt, ...)
+{
+ char *p;
+ int l = strlen(bb_buffer);
+ va_list ap;
+ va_start(ap, fmt);
+ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
+ va_end(ap);
+ if ((p = strchr(bb_buffer, '\n'))) {
+ *p = '\0';
+ p = bb_buffer;
+ p += strcspn(p, ":");
+ if (*p++ == ':')
+ bb_fixup_switch_to(p);
+ bb_parse_buffer();
+ bb_buffer[0] = '\0';
+ }
+ return 0;
+}
+
+static void
+bb_printaddr_pass2(bfd_vma addr, disassemble_info *dip)
+{
+ kdb_symtab_t symtab;
+ unsigned int offset;
+ dip->fprintf_func(dip->stream, "0x%lx", addr);
+ kdbnearsym(addr, &symtab);
+ if (symtab.sym_name) {
+ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
+ if ((offset = addr - symtab.sym_start))
+ dip->fprintf_func(dip->stream, "+0x%x", offset);
+ dip->fprintf_func(dip->stream, ">");
+ }
+}
+
+/* Set the starting register and memory state for the current bb */
+
+static void
+bb_start_block0_special(void)
+{
+ int i;
+ short offset_address;
+ enum bb_reg_code reg, value;
+ struct bb_name_state *r;
+ for (i = 0, r = bb_special_cases;
+ i < ARRAY_SIZE(bb_special_cases);
+ ++i, ++r) {
+ if (bb_func_start == r->address && r->fname == NULL)
+ goto match;
+ }
+ return;
+match:
+ /* Set the running registers */
+ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
+ value = r->regs[reg].value;
+ if (test_bit(value, r->skip_regs.bits)) {
+ /* this regs entry is not defined for this label */
+ continue;
+ }
+ bb_reg_code_set_value(reg, value);
+ bb_reg_code_set_offset(reg, r->regs[reg].offset);
+ }
+ /* Set any memory contents, e.g. pt_regs. Adjust RSP as required. */
+ offset_address = 0;
+ for (i = 0; i < r->mem_size; ++i) {
+ offset_address = max_t(int,
+ r->mem[i].offset_address + KDB_WORD_SIZE,
+ offset_address);
+ }
+ if (bb_reg_code_offset(BBRG_RSP) > -offset_address)
+ bb_adjust_osp(BBRG_RSP, -offset_address - bb_reg_code_offset(BBRG_RSP));
+ for (i = 0; i < r->mem_size; ++i) {
+ value = r->mem[i].value;
+ if (test_bit(value, r->skip_mem.bits)) {
+ /* this memory entry is not defined for this label */
+ continue;
+ }
+ bb_memory_set_reg_value(BBRG_RSP, r->mem[i].offset_address,
+ value, 0);
+ bb_reg_set_undef(value);
+ }
+ return;
+}
+
+static void
+bb_pass2_start_block(int number)
+{
+ int i, j, k, first, changed;
+ size_t size;
+ struct bb_jmp *bb_jmp;
+ struct bb_reg_state *state;
+ struct bb_memory_contains *c1, *c2;
+ bb_reg_state->mem_count = bb_reg_state_max;
+ size = bb_reg_state_size(bb_reg_state);
+ memset(bb_reg_state, 0, size);
+
+ if (number == 0) {
+ /* The first block is assumed to have well defined inputs */
+ bb_start_block0();
+ /* Some assembler labels have non-standard entry
+ * states.
+ */
+ bb_start_block0_special();
+ bb_reg_state_print(bb_reg_state);
+ return;
+ }
+
+ /* Merge all the input states for the current bb together */
+ first = 1;
+ changed = 0;
+ for (i = 0; i < bb_jmp_count; ++i) {
+ bb_jmp = bb_jmp_list + i;
+ if (bb_jmp->to != bb_curr->start)
+ continue;
+ state = bb_jmp->state;
+ if (!state)
+ continue;
+ if (first) {
+ size = bb_reg_state_size(state);
+ memcpy(bb_reg_state, state, size);
+ KDB_DEBUG_BB(" first state %p\n", state);
+ bb_reg_state_print(bb_reg_state);
+ first = 0;
+ continue;
+ }
+
+ KDB_DEBUG_BB(" merging state %p\n", state);
+ /* Merge the register states */
+ for (j = 0; j < ARRAY_SIZE(state->contains); ++j) {
+ if (memcmp(bb_reg_state->contains + j,
+ state->contains + j,
+ sizeof(bb_reg_state->contains[0]))) {
+ /* Different states for this register from two
+ * or more inputs, make it undefined.
+ */
+ if (bb_reg_state->contains[j].value ==
+ BBRG_UNDEFINED) {
+ KDB_DEBUG_BB(" ignoring %s\n",
+ bbrg_name[j + BBRG_RAX]);
+ } else {
+ bb_reg_set_undef(BBRG_RAX + j);
+ changed = 1;
+ }
+ }
+ }
+
+ /* Merge the memory states. This relies on both
+ * bb_reg_state->memory and state->memory being sorted in
+ * descending order, with undefined entries at the end.
+ */
+ c1 = bb_reg_state->memory;
+ c2 = state->memory;
+ j = k = 0;
+ while (j < bb_reg_state->mem_count &&
+ k < state->mem_count) {
+ if (c1->offset_address < c2->offset_address) {
+ KDB_DEBUG_BB_OFFSET(c2->offset_address,
+ " ignoring c2->offset_address ",
+ "\n");
+ ++c2;
+ ++k;
+ continue;
+ }
+ if (c1->offset_address > c2->offset_address) {
+ /* Memory location is not in all input states,
+ * delete the memory location.
+ */
+ bb_delete_memory(c1->offset_address);
+ changed = 1;
+ ++c1;
+ ++j;
+ continue;
+ }
+ if (memcmp(c1, c2, sizeof(*c1))) {
+ /* Same location, different contents, delete
+ * the memory location.
+ */
+ bb_delete_memory(c1->offset_address);
+ KDB_DEBUG_BB_OFFSET(c2->offset_address,
+ " ignoring c2->offset_address ",
+ "\n");
+ changed = 1;
+ }
+ ++c1;
+ ++c2;
+ ++j;
+ ++k;
+ }
+ while (j < bb_reg_state->mem_count) {
+ bb_delete_memory(c1->offset_address);
+ changed = 1;
+ ++c1;
+ ++j;
+ }
+ }
+ if (changed) {
+ KDB_DEBUG_BB(" final state\n");
+ bb_reg_state_print(bb_reg_state);
+ }
+}
+
+/* We have reached the exit point from the current function, either a call to
+ * the next function or the instruction that was about to executed when an
+ * interrupt occurred. Save the current register state in bb_exit_state.
+ */
+
+static void
+bb_save_exit_state(void)
+{
+ size_t size;
+ debug_kfree(bb_exit_state);
+ bb_exit_state = NULL;
+ bb_reg_state_canonicalize();
+ size = bb_reg_state_size(bb_reg_state);
+ bb_exit_state = debug_kmalloc(size, GFP_ATOMIC);
+ if (!bb_exit_state) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+ memcpy(bb_exit_state, bb_reg_state, size);
+}
+
+static int
+bb_pass2_do_changed_blocks(int allow_missing)
+{
+ int i, j, missing, changed, maxloops;
+ unsigned long addr;
+ struct bb_jmp *bb_jmp;
+ KDB_DEBUG_BB("\n %s: allow_missing %d\n", __FUNCTION__, allow_missing);
+ /* Absolute worst case is we have to iterate over all the basic blocks
+ * in an "out of order" state, each iteration losing one register or
+ * memory state. Any more loops than that is a bug. "out of order"
+ * means that the layout of blocks in memory does not match the logic
+ * flow through those blocks so (for example) block 27 comes before
+ * block 2. To allow for out of order blocks, multiply maxloops by the
+ * number of blocks.
+ */
+ maxloops = (KDB_INT_REGISTERS + bb_reg_state_max) * bb_count;
+ changed = 1;
+ do {
+ changed = 0;
+ for (i = 0; i < bb_count; ++i) {
+ bb_curr = bb_list[i];
+ if (!bb_curr->changed)
+ continue;
+ missing = 0;
+ for (j = 0, bb_jmp = bb_jmp_list;
+ j < bb_jmp_count;
+ ++j, ++bb_jmp) {
+ if (bb_jmp->to == bb_curr->start &&
+ !bb_jmp->state)
+ ++missing;
+ }
+ if (missing > allow_missing)
+ continue;
+ bb_curr->changed = 0;
+ changed = 1;
+ KDB_DEBUG_BB("\n bb[%d]\n", i);
+ bb_pass2_start_block(i);
+ for (addr = bb_curr->start;
+ addr <= bb_curr->end; ) {
+ bb_curr_addr = addr;
+ if (addr == bb_exit_addr)
+ bb_save_exit_state();
+ addr += kdba_id_printinsn(addr, &kdb_di);
+ kdb_di.fprintf_func(NULL, "\n");
+ if (bb_giveup)
+ goto done;
+ }
+ if (!bb_exit_state) {
+ /* ATTRIB_NORET functions are a problem with
+ * the current gcc. Allow the trailing address
+ * a bit of leaway.
+ */
+ if (addr == bb_exit_addr ||
+ addr == bb_exit_addr + 1)
+ bb_save_exit_state();
+ }
+ if (bb_curr->drop_through)
+ bb_transfer(bb_curr->end,
+ bb_list[i+1]->start, 1);
+ }
+ if (maxloops-- == 0) {
+ kdb_printf("\n\n%s maxloops reached\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ goto done;
+ }
+ } while(changed);
+done:
+ for (i = 0; i < bb_count; ++i) {
+ bb_curr = bb_list[i];
+ if (bb_curr->changed)
+ return 1; /* more to do, increase allow_missing */
+ }
+ return 0; /* all blocks done */
+}
+
+/* Assume that the current function is a pass through function that does not
+ * refer to its register parameters. Exclude known asmlinkage functions and
+ * assume the other functions actually use their registers.
+ */
+
+static void
+bb_assume_pass_through(void)
+{
+ static int first_time = 1;
+ if (strncmp(bb_func_name, "sys_", 4) == 0 ||
+ strncmp(bb_func_name, "compat_sys_", 11) == 0 ||
+ strcmp(bb_func_name, "schedule") == 0 ||
+ strcmp(bb_func_name, "do_softirq") == 0 ||
+ strcmp(bb_func_name, "printk") == 0 ||
+ strcmp(bb_func_name, "vprintk") == 0 ||
+ strcmp(bb_func_name, "preempt_schedule") == 0 ||
+ strcmp(bb_func_name, "start_kernel") == 0 ||
+ strcmp(bb_func_name, "csum_partial") == 0 ||
+ strcmp(bb_func_name, "csum_partial_copy_generic") == 0 ||
+ strcmp(bb_func_name, "math_state_restore") == 0 ||
+ strcmp(bb_func_name, "panic") == 0 ||
+ strcmp(bb_func_name, "kdb_printf") == 0 ||
+ strcmp(bb_func_name, "kdb_interrupt") == 0)
+ return;
+ if (bb_asmlinkage_arch())
+ return;
+ bb_reg_params = REGPARM;
+ if (first_time) {
+ kdb_printf(" %s has memory parameters but no register "
+ "parameters.\n Assuming it is a 'pass "
+ "through' function that does not refer to "
+ "its register\n parameters and setting %d "
+ "register parameters\n",
+ bb_func_name, REGPARM);
+ first_time = 0;
+ return;
+ }
+ kdb_printf(" Assuming %s is 'pass through' with %d register "
+ "parameters\n",
+ bb_func_name, REGPARM);
+}
+
+static void
+bb_pass2(void)
+{
+ int allow_missing;
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf("%s: start\n", __FUNCTION__);
+
+ kdb_di.fprintf_func = bb_dis_pass2;
+ kdb_di.print_address_func = bb_printaddr_pass2;
+
+ bb_reg_state = debug_kmalloc(sizeof(*bb_reg_state), GFP_ATOMIC);
+ if (!bb_reg_state) {
+ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+ bb_list[0]->changed = 1;
+
+ /* If a block does not have all its input states available then it is
+ * possible for a register to initially appear to hold a known value,
+ * but when other inputs are available then it becomes a variable
+ * value. The initial false state of "known" can generate false values
+ * for other registers and can even make it look like stack locations
+ * are being changed.
+ *
+ * To avoid these false positives, only process blocks which have all
+ * their inputs defined. That gives a clean depth first traversal of
+ * the tree, except for loops. If there are any loops, then start
+ * processing blocks with one missing input, then two missing inputs
+ * etc.
+ *
+ * Absolute worst case is we have to iterate over all the jmp entries,
+ * each iteration allowing one more missing input. Any more loops than
+ * that is a bug. Watch out for the corner case of 0 jmp entries.
+ */
+ for (allow_missing = 0; allow_missing <= bb_jmp_count; ++allow_missing) {
+ if (!bb_pass2_do_changed_blocks(allow_missing))
+ break;
+ if (bb_giveup)
+ break;
+ }
+ if (allow_missing > bb_jmp_count) {
+ kdb_printf("\n\n%s maxloops reached\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+
+ if (bb_memory_params && bb_reg_params)
+ bb_reg_params = REGPARM;
+ if (REGPARM &&
+ bb_memory_params &&
+ !bb_reg_params)
+ bb_assume_pass_through();
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
+ kdb_printf("%s: end bb_reg_params %d bb_memory_params %d\n",
+ __FUNCTION__, bb_reg_params, bb_memory_params);
+ if (bb_exit_state) {
+ kdb_printf("%s: bb_exit_state at " kdb_bfd_vma_fmt0 "\n",
+ __FUNCTION__, bb_exit_addr);
+ bb_do_reg_state_print(bb_exit_state);
+ }
+ }
+}
+
+static void
+bb_cleanup(void)
+{
+ int i;
+ struct bb* bb;
+ struct bb_reg_state *state;
+ while (bb_count) {
+ bb = bb_list[0];
+ bb_delete(0);
+ }
+ debug_kfree(bb_list);
+ bb_list = NULL;
+ bb_count = bb_max = 0;
+ for (i = 0; i < bb_jmp_count; ++i) {
+ state = bb_jmp_list[i].state;
+ if (state && --state->ref_count == 0)
+ debug_kfree(state);
+ }
+ debug_kfree(bb_jmp_list);
+ bb_jmp_list = NULL;
+ bb_jmp_count = bb_jmp_max = 0;
+ debug_kfree(bb_reg_state);
+ bb_reg_state = NULL;
+ bb_reg_state_max = 0;
+ debug_kfree(bb_exit_state);
+ bb_exit_state = NULL;
+ bb_reg_params = bb_memory_params = 0;
+ bb_giveup = 0;
+}
+
+static int
+bb_spurious_global_label(const char *func_name)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
+ if (strcmp(bb_spurious[i], func_name) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/* Given the current actual register contents plus the exit state deduced from
+ * a basic block analysis of the current function, rollback the actual register
+ * contents to the values they had on entry to this function.
+ */
+
+static void
+bb_actual_rollback(const struct kdb_activation_record *ar)
+{
+ int i, offset_address;
+ struct bb_memory_contains *c;
+ enum bb_reg_code reg;
+ unsigned long address, osp = 0;
+ struct bb_actual new[ARRAY_SIZE(bb_actual)];
+
+
+ if (!bb_exit_state) {
+ kdb_printf("%s: no bb_exit_state, cannot rollback\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+ memcpy(bb_reg_state, bb_exit_state, bb_reg_state_size(bb_exit_state));
+ memset(new, 0, sizeof(new));
+
+ /* The most important register for obtaining saved state is rsp so get
+ * its new value first. Prefer rsp if it is valid, then other
+ * registers. Saved values of rsp in memory are unusable without a
+ * register that points to memory.
+ */
+ if (!bb_actual_valid(BBRG_RSP)) {
+ kdb_printf("%s: no starting value for RSP, cannot rollback\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf("%s: rsp " kdb_bfd_vma_fmt0,
+ __FUNCTION__, bb_actual_value(BBRG_RSP));
+ i = BBRG_RSP;
+ if (!bb_is_osp_defined(i)) {
+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
+ if (bb_is_osp_defined(i) && bb_actual_valid(i))
+ break;
+ }
+ }
+ if (bb_is_osp_defined(i) && bb_actual_valid(i)) {
+ osp = new[BBRG_RSP - BBRG_RAX].value =
+ bb_actual_value(i) - bb_reg_code_offset(i);
+ new[BBRG_RSP - BBRG_RAX].valid = 1;
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf(" -> osp " kdb_bfd_vma_fmt0 "\n", osp);
+ } else {
+ bb_actual_set_valid(BBRG_RSP, 0);
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf(" -> undefined\n");
+ kdb_printf("%s: no ending value for RSP, cannot rollback\n",
+ __FUNCTION__);
+ bb_giveup = 1;
+ return;
+ }
+
+ /* Now the other registers. First look at register values that have
+ * been copied to other registers.
+ */
+ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
+ reg = bb_reg_code_value(i);
+ if (bb_is_int_reg(reg)) {
+ new[reg - BBRG_RAX] = bb_actual[i - BBRG_RAX];
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
+ kdb_printf("%s: %s is in %s ",
+ __FUNCTION__,
+ bbrg_name[reg],
+ bbrg_name[i]);
+ if (bb_actual_valid(i))
+ kdb_printf(" -> " kdb_bfd_vma_fmt0 "\n",
+ bb_actual_value(i));
+ else
+ kdb_printf("(invalid)\n");
+ }
+ }
+ }
+
+ /* Finally register values that have been saved on stack */
+ for (i = 0, c = bb_reg_state->memory;
+ i < bb_reg_state->mem_count;
+ ++i, ++c) {
+ offset_address = c->offset_address;
+ reg = c->value;
+ if (!bb_is_int_reg(reg))
+ continue;
+ address = osp + offset_address;
+ if (address < ar->stack.logical_start ||
+ address >= ar->stack.logical_end) {
+ new[reg - BBRG_RAX].value = 0;
+ new[reg - BBRG_RAX].valid = 0;
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf("%s: %s -> undefined\n",
+ __FUNCTION__,
+ bbrg_name[reg]);
+ } else {
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
+ kdb_printf("%s: %s -> *(osp",
+ __FUNCTION__,
+ bbrg_name[reg]);
+ KDB_DEBUG_BB_OFFSET_PRINTF(offset_address, "", " ");
+ kdb_printf(kdb_bfd_vma_fmt0, address);
+ }
+ new[reg - BBRG_RAX].value = *(bfd_vma *)address;
+ new[reg - BBRG_RAX].valid = 1;
+ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
+ kdb_printf(") = " kdb_bfd_vma_fmt0 "\n",
+ new[reg - BBRG_RAX].value);
+ }
+ }
+
+ memcpy(bb_actual, new, sizeof(bb_actual));
+}
+
+/* Return true if the current function is an interrupt handler */
+
+static bool
+bb_interrupt_handler(kdb_machreg_t rip)
+{
+ unsigned long disp8, disp32, target, addr = (unsigned long)rip;
+ unsigned char code[5];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bb_hardware_handlers); ++i)
+ if (strcmp(bb_func_name, bb_hardware_handlers[i]) == 0)
+ return 1;
+
+ /* Given the large number of interrupt handlers, it is easiest to look
+ * at the next instruction and see if it is a jmp to the common exit
+ * routines.
+ */
+ if (kdb_getarea(code, addr) ||
+ kdb_getword(&disp32, addr+1, 4) ||
+ kdb_getword(&disp8, addr+1, 1))
+ return 0; /* not a valid code address */
+ if (code[0] == 0xe9) {
+ target = addr + (s32) disp32 + 5; /* jmp disp32 */
+ if (target == bb_ret_from_intr ||
+ target == bb_common_interrupt ||
+ target == bb_error_entry)
+ return 1;
+ }
+ if (code[0] == 0xeb) {
+ target = addr + (s8) disp8 + 2; /* jmp disp8 */
+ if (target == bb_ret_from_intr ||
+ target == bb_common_interrupt ||
+ target == bb_error_entry)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Copy argument information that was deduced by the basic block analysis and
+ * rollback into the kdb stack activation record.
+ */
+
+static void
+bb_arguments(struct kdb_activation_record *ar)
+{
+ int i;
+ enum bb_reg_code reg;
+ kdb_machreg_t rsp;
+ ar->args = bb_reg_params + bb_memory_params;
+ bitmap_zero(ar->valid.bits, KDBA_MAXARGS);
+ for (i = 0; i < bb_reg_params; ++i) {
+ reg = bb_param_reg[i];
+ if (bb_actual_valid(reg)) {
+ ar->arg[i] = bb_actual_value(reg);
+ set_bit(i, ar->valid.bits);
+ }
+ }
+ if (!bb_actual_valid(BBRG_RSP))
+ return;
+ rsp = bb_actual_value(BBRG_RSP);
+ for (i = bb_reg_params; i < ar->args; ++i) {
+ rsp += KDB_WORD_SIZE;
+ if (kdb_getarea(ar->arg[i], rsp) == 0)
+ set_bit(i, ar->valid.bits);
+ }
+}
+
+/* Given an exit address from a function, decompose the entire function into
+ * basic blocks and determine the register state at the exit point.
+ */
+
+static void
+kdb_bb(unsigned long exit)
+{
+ kdb_symtab_t symtab;
+ if (!kdbnearsym(exit, &symtab)) {
+ kdb_printf("%s: address " kdb_bfd_vma_fmt0 " not recognised\n",
+ __FUNCTION__, exit);
+ bb_giveup = 1;
+ return;
+ }
+ bb_exit_addr = exit;
+ bb_mod_name = symtab.mod_name;
+ bb_func_name = symtab.sym_name;
+ bb_func_start = symtab.sym_start;
+ bb_func_end = symtab.sym_end;
+ /* Various global labels exist in the middle of assembler code and have
+ * a non-standard state. Ignore these labels and use the start of the
+ * previous label instead.
+ */
+ while (bb_spurious_global_label(symtab.sym_name)) {
+ if (!kdbnearsym(symtab.sym_start - 1, &symtab))
+ break;
+ bb_func_start = symtab.sym_start;
+ }
+ bb_mod_name = symtab.mod_name;
+ bb_func_name = symtab.sym_name;
+ bb_func_start = symtab.sym_start;
+ /* Ignore spurious labels past this point and use the next non-spurious
+ * label as the end point.
+ */
+ if (kdbnearsym(bb_func_end, &symtab)) {
+ while (bb_spurious_global_label(symtab.sym_name)) {
+ bb_func_end = symtab.sym_end;
+ if (!kdbnearsym(symtab.sym_end + 1, &symtab))
+ break;
+ }
+ }
+ bb_pass1();
+ if (!bb_giveup)
+ bb_pass2();
+ if (bb_giveup)
+ kdb_printf("%s: " kdb_bfd_vma_fmt0
+ " [%s]%s failed at " kdb_bfd_vma_fmt0 "\n\n",
+ __FUNCTION__, exit,
+ bb_mod_name, bb_func_name, bb_curr_addr);
+}
+
+static int
+kdb_bb1(int argc, const char **argv)
+{
+ int diag, nextarg = 1;
+ kdb_machreg_t addr;
+ unsigned long offset;
+
+ bb_cleanup(); /* in case previous command was interrupted */
+ kdba_id_init(&kdb_di);
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ if (!addr)
+ return KDB_BADADDR;
+ kdb_save_flags();
+ kdb_flags |= KDB_DEBUG_FLAG_BB << KDB_DEBUG_FLAG_SHIFT;
+ kdb_bb(addr);
+ bb_cleanup();
+ kdb_restore_flags();
+ kdbnearsym_cleanup();
+ return 0;
+}
+
+/* Run a basic block analysis on every function in the base kernel. Used as a
+ * global sanity check to find errors in the basic block code.
+ */
+
+static int
+kdb_bb_all(int argc, const char **argv)
+{
+ loff_t pos = 0;
+ const char *symname;
+ unsigned long addr;
+ int i, max_errors = 20;
+ struct bb_name_state *r;
+ kdb_printf("%s: build variables:"
+ " CCVERSION \"" __stringify(CCVERSION) "\""
+#ifdef CONFIG_X86_64
+ " CONFIG_X86_64"
+#endif
+#ifdef CONFIG_4KSTACKS
+ " CONFIG_4KSTACKS"
+#endif
+#ifdef CONFIG_PREEMPT
+ " CONFIG_PREEMPT"
+#endif
+#ifdef CONFIG_VM86
+ " CONFIG_VM86"
+#endif
+#ifdef CONFIG_FRAME_POINTER
+ " CONFIG_FRAME_POINTER"
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ " CONFIG_TRACE_IRQFLAGS"
+#endif
+#ifdef CONFIG_HIBERNATION
+ " CONFIG_HIBERNATION"
+#endif
+#ifdef CONFIG_KPROBES
+ " CONFIG_KPROBES"
+#endif
+#ifdef CONFIG_KEXEC
+ " CONFIG_KEXEC"
+#endif
+#ifdef CONFIG_MATH_EMULATION
+ " CONFIG_MATH_EMULATION"
+#endif
+#ifdef CONFIG_XEN
+ " CONFIG_XEN"
+#endif
+#ifdef CONFIG_DEBUG_INFO
+ " CONFIG_DEBUG_INFO"
+#endif
+#ifdef NO_SIBLINGS
+ " NO_SIBLINGS"
+#endif
+ " REGPARM=" __stringify(REGPARM)
+ "\n\n", __FUNCTION__);
+ for (i = 0, r = bb_special_cases;
+ i < ARRAY_SIZE(bb_special_cases);
+ ++i, ++r) {
+ if (!r->address)
+ kdb_printf("%s: cannot find special_case name %s\n",
+ __FUNCTION__, r->name);
+ }
+ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
+ if (!kallsyms_lookup_name(bb_spurious[i]))
+ kdb_printf("%s: cannot find spurious label %s\n",
+ __FUNCTION__, bb_spurious[i]);
+ }
+ while ((symname = kdb_walk_kallsyms(&pos))) {
+ if (strcmp(symname, "_stext") == 0 ||
+ strcmp(symname, "stext") == 0)
+ break;
+ }
+ if (!symname) {
+ kdb_printf("%s: cannot find _stext\n", __FUNCTION__);
+ return 0;
+ }
+ kdba_id_init(&kdb_di);
+ i = 0;
+ while ((symname = kdb_walk_kallsyms(&pos))) {
+ if (strcmp(symname, "_etext") == 0)
+ break;
+ if (i++ % 100 == 0)
+ kdb_printf(".");
+ /* x86_64 has some 16 bit functions that appear between stext
+ * and _etext. Skip them.
+ */
+ if (strcmp(symname, "verify_cpu") == 0 ||
+ strcmp(symname, "verify_cpu_noamd") == 0 ||
+ strcmp(symname, "verify_cpu_sse_test") == 0 ||
+ strcmp(symname, "verify_cpu_no_longmode") == 0 ||
+ strcmp(symname, "verify_cpu_sse_ok") == 0 ||
+ strcmp(symname, "mode_seta") == 0 ||
+ strcmp(symname, "bad_address") == 0 ||
+ strcmp(symname, "wakeup_code") == 0 ||
+ strcmp(symname, "wakeup_code_start") == 0 ||
+ strcmp(symname, "wakeup_start") == 0 ||
+ strcmp(symname, "wakeup_32_vector") == 0 ||
+ strcmp(symname, "wakeup_32") == 0 ||
+ strcmp(symname, "wakeup_long64_vector") == 0 ||
+ strcmp(symname, "wakeup_long64") == 0 ||
+ strcmp(symname, "gdta") == 0 ||
+ strcmp(symname, "idt_48a") == 0 ||
+ strcmp(symname, "gdt_48a") == 0 ||
+ strcmp(symname, "bogus_real_magic") == 0 ||
+ strcmp(symname, "bogus_64_magic") == 0 ||
+ strcmp(symname, "no_longmode") == 0 ||
+ strcmp(symname, "mode_set") == 0 ||
+ strcmp(symname, "mode_seta") == 0 ||
+ strcmp(symname, "setbada") == 0 ||
+ strcmp(symname, "check_vesa") == 0 ||
+ strcmp(symname, "check_vesaa") == 0 ||
+ strcmp(symname, "_setbada") == 0 ||
+ strcmp(symname, "wakeup_stack_begin") == 0 ||
+ strcmp(symname, "wakeup_stack") == 0 ||
+ strcmp(symname, "wakeup_level4_pgt") == 0 ||
+ strcmp(symname, "acpi_copy_wakeup_routine") == 0 ||
+ strcmp(symname, "wakeup_end") == 0 ||
+ strcmp(symname, "do_suspend_lowlevel_s4bios") == 0 ||
+ strcmp(symname, "do_suspend_lowlevel") == 0 ||
+ strcmp(symname, "wakeup_pmode_return") == 0 ||
+ strcmp(symname, "restore_registers") == 0)
+ continue;
+ /* __kprobes_text_end contains branches to the middle of code,
+ * with undefined states.
+ */
+ if (strcmp(symname, "__kprobes_text_end") == 0)
+ continue;
+ /* Data in the middle of the text segment :( */
+ if (strcmp(symname, "level2_kernel_pgt") == 0 ||
+ strcmp(symname, "level3_kernel_pgt") == 0)
+ continue;
+ if (bb_spurious_global_label(symname))
+ continue;
+ if ((addr = kallsyms_lookup_name(symname)) == 0)
+ continue;
+ // kdb_printf("BB " kdb_bfd_vma_fmt0 " %s\n", addr, symname);
+ bb_cleanup(); /* in case previous command was interrupted */
+ kdbnearsym_cleanup();
+ kdb_bb(addr);
+ touch_nmi_watchdog();
+ if (bb_giveup) {
+ if (max_errors-- == 0) {
+ kdb_printf("%s: max_errors reached, giving up\n",
+ __FUNCTION__);
+ break;
+ } else {
+ bb_giveup = 0;
+ }
+ }
+ }
+ kdb_printf("\n");
+ bb_cleanup();
+ kdbnearsym_cleanup();
+ return 0;
+}
+
+/*
+ *=============================================================================
+ *
+ * Everything above this line is doing basic block analysis, function by
+ * function. Everything below this line uses the basic block data to do a
+ * complete backtrace over all functions that are used by a process.
+ *
+ *=============================================================================
+ */
+
+
+/*============================================================================*/
+/* */
+/* Most of the backtrace code and data is common to x86_64 and i386. This */
+/* large ifdef contains all of the differences between the two architectures. */
+/* */
+/* Make sure you update the correct section of this ifdef. */
+/* */
+/*============================================================================*/
+#define XCS "cs"
+#define RSP "sp"
+#define RIP "ip"
+#define ARCH_RSP sp
+#define ARCH_RIP ip
+
+#ifdef CONFIG_X86_64
+
+#define ARCH_NORMAL_PADDING (16 * 8)
+
+/* x86_64 has multiple alternate stacks, with different sizes and different
+ * offsets to get the link from one stack to the next. All of the stacks are
+ * in the per_cpu area: either in the orig_ist or irq_stack_ptr. Debug events
+ * can even have multiple nested stacks within the single physical stack,
+ * each nested stack has its own link and some of those links are wrong.
+ *
+ * Consistent it's not!
+ *
+ * Do not assume that these stacks are aligned on their size.
+ */
+#define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1)
+void
+kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
+ struct kdb_activation_record *ar)
+{
+ static struct {
+ const char *id;
+ unsigned int total_size;
+ unsigned int nested_size;
+ unsigned int next;
+ } *sdp, stack_data[] = {
+ [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
+ [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
+ [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
+ [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
+ [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
+ [INTERRUPT_STACK - 1] = { "interrupt", IRQ_STACK_SIZE, IRQ_STACK_SIZE, IRQ_STACK_SIZE - sizeof(void *) },
+ };
+ unsigned long total_start = 0, total_size, total_end;
+ int sd, found = 0;
+ extern unsigned long kdba_orig_ist(int, int);
+
+ for (sd = 0, sdp = stack_data;
+ sd < ARRAY_SIZE(stack_data);
+ ++sd, ++sdp) {
+ total_size = sdp->total_size;
+ if (!total_size)
+ continue; /* in case stack_data[] has any holes */
+ if (cpu < 0) {
+ /* Arbitrary address which can be on any cpu, see if it
+ * falls within any of the alternate stacks
+ */
+ int c;
+ for_each_online_cpu(c) {
+ if (sd == INTERRUPT_STACK - 1)
+ total_end = (unsigned long)per_cpu(irq_stack_ptr, c);
+ else
+ total_end = per_cpu(orig_ist, c).ist[sd];
+ total_start = total_end - total_size;
+ if (addr >= total_start && addr < total_end) {
+ found = 1;
+ cpu = c;
+ break;
+ }
+ }
+ if (!found)
+ continue;
+ }
+ /* Only check the supplied or found cpu */
+ if (sd == INTERRUPT_STACK - 1)
+ total_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
+ else
+ total_end = per_cpu(orig_ist, cpu).ist[sd];
+ total_start = total_end - total_size;
+ if (addr >= total_start && addr < total_end) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return;
+ /* find which nested stack the address is in */
+ while (addr > total_start + sdp->nested_size)
+ total_start += sdp->nested_size;
+ ar->stack.physical_start = total_start;
+ ar->stack.physical_end = total_start + sdp->nested_size;
+ ar->stack.logical_start = total_start;
+ ar->stack.logical_end = total_start + sdp->next;
+ ar->stack.next = *(unsigned long *)ar->stack.logical_end;
+ ar->stack.id = sdp->id;
+
+ /* Nasty: when switching to the interrupt stack, the stack state of the
+ * caller is split over two stacks, the original stack and the
+ * interrupt stack. One word (the previous frame pointer) is stored on
+ * the interrupt stack, the rest of the interrupt data is in the old
+ * frame. To make the interrupted stack state look as though it is
+ * contiguous, copy the missing word from the interrupt stack to the
+ * original stack and adjust the new stack pointer accordingly.
+ */
+
+ if (sd == INTERRUPT_STACK - 1) {
+ *(unsigned long *)(ar->stack.next - KDB_WORD_SIZE) =
+ ar->stack.next;
+ ar->stack.next -= KDB_WORD_SIZE;
+ }
+}
+
+/* rip is not in the thread struct for x86_64. We know that the stack value
+ * was saved in schedule near the label thread_return. Setting rip to
+ * thread_return lets the stack trace find that we are in schedule and
+ * correctly decode its prologue.
+ */
+
+static kdb_machreg_t
+kdba_bt_stack_rip(const struct task_struct *p)
+{
+ return bb_thread_return;
+}
+
+#else /* !CONFIG_X86_64 */
+
+#define ARCH_NORMAL_PADDING (19 * 4)
+
+#ifdef CONFIG_4KSTACKS
+static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx;
+#endif /* CONFIG_4KSTACKS */
+
+/* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The
+ * first element of each per-cpu stack is a struct thread_info.
+ */
+void
+kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
+ struct kdb_activation_record *ar)
+{
+#ifdef CONFIG_4KSTACKS
+ struct thread_info *tinfo;
+ tinfo = (struct thread_info *)(addr & -THREAD_SIZE);
+ if (cpu < 0) {
+ /* Arbitrary address, see if it falls within any of the irq
+ * stacks
+ */
+ int found = 0;
+ for_each_online_cpu(cpu) {
+ if (tinfo == kdba_hardirq_ctx[cpu] ||
+ tinfo == kdba_softirq_ctx[cpu]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return;
+ }
+ if (tinfo == kdba_hardirq_ctx[cpu] ||
+ tinfo == kdba_softirq_ctx[cpu]) {
+ ar->stack.physical_start = (kdb_machreg_t)tinfo;
+ ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE;
+ ar->stack.logical_start = ar->stack.physical_start +
+ sizeof(struct thread_info);
+ ar->stack.logical_end = ar->stack.physical_end;
+ ar->stack.next = tinfo->previous_esp;
+ if (tinfo == kdba_hardirq_ctx[cpu])
+ ar->stack.id = "hardirq_ctx";
+ else
+ ar->stack.id = "softirq_ctx";
+ }
+#endif /* CONFIG_4KSTACKS */
+}
+
+/* rip is in the thread struct for i386 */
+
+static kdb_machreg_t
+kdba_bt_stack_rip(const struct task_struct *p)
+{
+ return p->thread.ip;
+}
+
+#endif /* CONFIG_X86_64 */
+
+/* Given an address which claims to be on a stack, an optional cpu number and
+ * an optional task address, get information about the stack.
+ *
+ * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
+ * struct task, the address can be in an alternate stack or any task's normal
+ * stack.
+ *
+ * t != NULL, cpu >= 0 indicates a running task, the address can be in an
+ * alternate stack or that task's normal stack.
+ *
+ * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
+ * task's normal stack.
+ *
+ * t == NULL, cpu >= 0 is not a valid combination.
+ */
+
+static void
+kdba_get_stack_info(kdb_machreg_t rsp, int cpu,
+ struct kdb_activation_record *ar,
+ const struct task_struct *t)
+{
+ struct thread_info *tinfo;
+ struct task_struct *g, *p;
+ memset(&ar->stack, 0, sizeof(ar->stack));
+ if (KDB_DEBUG(ARA))
+ kdb_printf("%s: " RSP "=0x%lx cpu=%d task=%p\n",
+ __FUNCTION__, rsp, cpu, t);
+ if (t == NULL || cpu >= 0) {
+ kdba_get_stack_info_alternate(rsp, cpu, ar);
+ if (ar->stack.logical_start)
+ goto out;
+ }
+ rsp &= -THREAD_SIZE;
+ tinfo = (struct thread_info *)rsp;
+ if (t == NULL) {
+ /* Arbitrary stack address without an associated task, see if
+ * it falls within any normal process stack, including the idle
+ * tasks.
+ */
+ kdb_do_each_thread(g, p) {
+ if (tinfo == task_thread_info(p)) {
+ t = p;
+ goto found;
+ }
+ } kdb_while_each_thread(g, p);
+ for_each_online_cpu(cpu) {
+ p = idle_task(cpu);
+ if (tinfo == task_thread_info(p)) {
+ t = p;
+ goto found;
+ }
+ }
+ found:
+ if (KDB_DEBUG(ARA))
+ kdb_printf("%s: found task %p\n", __FUNCTION__, t);
+ } else if (cpu >= 0) {
+ /* running task */
+ struct kdb_running_process *krp = kdb_running_process + cpu;
+ if (krp->p != t || tinfo != task_thread_info(t))
+ t = NULL;
+ if (KDB_DEBUG(ARA))
+ kdb_printf("%s: running task %p\n", __FUNCTION__, t);
+ } else {
+ /* blocked task */
+ if (tinfo != task_thread_info(t))
+ t = NULL;
+ if (KDB_DEBUG(ARA))
+ kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
+ }
+ if (t) {
+ ar->stack.physical_start = rsp;
+ ar->stack.physical_end = rsp + THREAD_SIZE;
+ ar->stack.logical_start = rsp + sizeof(struct thread_info);
+ ar->stack.logical_end = ar->stack.physical_end - ARCH_NORMAL_PADDING;
+ ar->stack.next = 0;
+ ar->stack.id = "normal";
+ }
+out:
+ if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
+ kdb_printf("%s: ar->stack\n", __FUNCTION__);
+ kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
+ kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
+ kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
+ kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
+ kdb_printf(" next=0x%lx\n", ar->stack.next);
+ kdb_printf(" id=%s\n", ar->stack.id);
+ kdb_printf(" set MDCOUNT %ld\n",
+ (ar->stack.physical_end - ar->stack.physical_start) /
+ KDB_WORD_SIZE);
+ kdb_printf(" mds " kdb_machreg_fmt0 "\n",
+ ar->stack.physical_start);
+ }
+}
+
+static void
+bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp,
+ const struct kdb_activation_record *ar,
+ const kdb_symtab_t *symtab, int argcount)
+{
+ int btsymarg = 0;
+ int nosect = 0;
+
+ kdbgetintenv("BTSYMARG", &btsymarg);
+ kdbgetintenv("NOSECT", &nosect);
+
+ kdb_printf(kdb_machreg_fmt0, rsp);
+ kdb_symbol_print(rip, symtab,
+ KDB_SP_SPACEB|KDB_SP_VALUE);
+ if (argcount && ar->args) {
+ int i, argc = ar->args;
+ kdb_printf(" (");
+ if (argc > argcount)
+ argc = argcount;
+ for (i = 0; i < argc; i++) {
+ if (i)
+ kdb_printf(", ");
+ if (test_bit(i, ar->valid.bits))
+ kdb_printf("0x%lx", ar->arg[i]);
+ else
+ kdb_printf("invalid");
+ }
+ kdb_printf(")");
+ }
+ kdb_printf("\n");
+ if (symtab->sym_name) {
+ if (!nosect) {
+ kdb_printf(" %s",
+ symtab->mod_name);
+ if (symtab->sec_name && symtab->sec_start)
+ kdb_printf(" 0x%lx 0x%lx",
+ symtab->sec_start, symtab->sec_end);
+ kdb_printf(" 0x%lx 0x%lx\n",
+ symtab->sym_start, symtab->sym_end);
+ }
+ }
+ if (argcount && ar->args && btsymarg) {
+ int i, argc = ar->args;
+ kdb_symtab_t arg_symtab;
+ for (i = 0; i < argc; i++) {
+ kdb_machreg_t arg = ar->arg[i];
+ if (test_bit(i, ar->valid.bits) &&
+ kdbnearsym(arg, &arg_symtab)) {
+ kdb_printf(" ARG %2d ", i);
+ kdb_symbol_print(arg, &arg_symtab,
+ KDB_SP_DEFAULT|KDB_SP_NEWLINE);
+ }
+ }
+ }
+}
+
+static void
+kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp,
+ int *count, int *suppress)
+{
+ /* Nasty: save_args builds a partial pt_regs, with r15 through
+ * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in
+ * rdi) but the stack pointer is not adjusted to account for r15
+ * through rbx. This has two effects :-
+ *
+ * (1) struct pt_regs on an external interrupt actually overlaps with
+ * the local stack area used by do_IRQ. Not only are r15-rbx
+ * undefined, the area that claims to hold their values can even
+ * change as the irq is processed.
+ *
+ * (2) The back stack pointer saved for the new frame is not pointing
+ * at pt_regs, it is pointing at rbx within the pt_regs passed to
+ * do_IRQ.
+ *
+ * There is nothing that I can do about (1) but I have to fix (2)
+ * because kdb backtrace looks for the "start" address of pt_regs as it
+ * walks back through the stacks. When switching from the interrupt
+ * stack to another stack, we have to assume that pt_regs has been
+ * seen and turn off backtrace supression.
+ */
+ int probable_pt_regs = strcmp(ar->stack.id, "interrupt") == 0;
+ *rsp = ar->stack.next;
+ if (KDB_DEBUG(ARA))
+ kdb_printf("new " RSP "=" kdb_machreg_fmt0 "\n", *rsp);
+ bb_actual_set_value(BBRG_RSP, *rsp);
+ kdba_get_stack_info(*rsp, -1, ar, NULL);
+ if (!ar->stack.physical_start) {
+ kdb_printf("+++ Cannot resolve next stack\n");
+ } else if (!*suppress) {
+ kdb_printf(" ======================= <%s>\n",
+ ar->stack.id);
+ ++*count;
+ }
+ if (probable_pt_regs)
+ *suppress = 0;
+}
+
+/*
+ * kdba_bt_stack
+ *
+ * Inputs:
+ * addr Address provided to 'bt' command, if any.
+ * argcount
+ * p Pointer to task for 'btp' command.
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * Ultimately all the bt* commands come through this routine. If
+ * old_style is 0 then it uses the basic block analysis to get an accurate
+ * backtrace with arguments, otherwise it falls back to the old method of
+ * printing anything on stack that looks like a kernel address.
+ *
+ * Allowing for the stack data pushed by the hardware is tricky. We
+ * deduce the presence of hardware pushed data by looking for interrupt
+ * handlers, either by name or by the code that they contain. This
+ * information must be applied to the next function up the stack, because
+ * the hardware data is above the saved rip for the interrupted (next)
+ * function.
+ *
+ * To make things worse, the amount of data pushed is arch specific and
+ * may depend on the rsp for the next function, not the current function.
+ * The number of bytes pushed by hardware cannot be calculated until we
+ * are actually processing the stack for the interrupted function and have
+ * its rsp.
+ *
+ * It is also possible for an interrupt to occur in user space and for the
+ * interrupt handler to also be interrupted. Check the code selector
+ * whenever the previous function is an interrupt handler and stop
+ * backtracing if the interrupt was not in kernel space.
+ */
+
+static int
+kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p,
+ int old_style)
+{
+ struct kdb_activation_record ar;
+ kdb_machreg_t rip = 0, rsp = 0, prev_rsp, cs;
+ kdb_symtab_t symtab;
+ int rip_at_rsp = 0, count = 0, btsp = 0, suppress,
+ interrupt_handler = 0, prev_interrupt_handler = 0, hardware_pushed,
+ prev_noret = 0;
+ struct pt_regs *regs = NULL;
+
+ kdbgetintenv("BTSP", &btsp);
+ suppress = !btsp;
+ memset(&ar, 0, sizeof(ar));
+ if (old_style)
+ kdb_printf("Using old style backtrace, unreliable with no arguments\n");
+
+ /*
+ * The caller may have supplied an address at which the stack traceback
+ * operation should begin. This address is assumed by this code to
+ * point to a return address on the stack to be traced back.
+ *
+ * Warning: type in the wrong address and you will get garbage in the
+ * backtrace.
+ */
+ if (addr) {
+ rsp = addr;
+ kdb_getword(&rip, rsp, sizeof(rip));
+ rip_at_rsp = 1;
+ suppress = 0;
+ kdba_get_stack_info(rsp, -1, &ar, NULL);
+ } else {
+ if (task_curr(p)) {
+ struct kdb_running_process *krp =
+ kdb_running_process + task_cpu(p);
+ kdb_machreg_t cs;
+ regs = krp->regs;
+ if (krp->seqno &&
+ krp->p == p &&
+ krp->seqno >= kdb_seqno - 1 &&
+ !KDB_NULL_REGS(regs)) {
+ /* valid saved state, continue processing */
+ } else {
+ kdb_printf
+ ("Process did not save state, cannot backtrace\n");
+ kdb_ps1(p);
+ return 0;
+ }
+ kdba_getregcontents(XCS, regs, &cs);
+ if ((cs & 0xffff) != __KERNEL_CS) {
+ kdb_printf("Stack is not in kernel space, backtrace not available\n");
+ return 0;
+ }
+ rip = krp->arch.ARCH_RIP;
+ rsp = krp->arch.ARCH_RSP;
+ kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p);
+ } else {
+ /* Not on cpu, assume blocked. Blocked tasks do not
+ * have pt_regs. p->thread contains some data, alas
+ * what it contains differs between i386 and x86_64.
+ */
+ rip = kdba_bt_stack_rip(p);
+ rsp = p->thread.sp;
+ suppress = 0;
+ kdba_get_stack_info(rsp, -1, &ar, p);
+ }
+ }
+ if (!ar.stack.physical_start) {
+ kdb_printf(RSP "=0x%lx is not in a valid kernel stack, backtrace not available\n",
+ rsp);
+ return 0;
+ }
+ memset(&bb_actual, 0, sizeof(bb_actual));
+ bb_actual_set_value(BBRG_RSP, rsp);
+ bb_actual_set_valid(BBRG_RSP, 1);
+
+ kdb_printf(RSP "%*s" RIP "%*sFunction (args)\n",
+ 2*KDB_WORD_SIZE, " ",
+ 2*KDB_WORD_SIZE, " ");
+ if (ar.stack.next && !suppress)
+ kdb_printf(" ======================= <%s>\n",
+ ar.stack.id);
+
+ bb_cleanup();
+ /* Run through all the stacks */
+ while (ar.stack.physical_start) {
+ if (rip_at_rsp) {
+ rip = *(kdb_machreg_t *)rsp;
+ /* I wish that gcc was fixed to include a nop
+ * instruction after ATTRIB_NORET functions. The lack
+ * of a nop means that the return address points to the
+ * start of next function, so fudge it to point to one
+ * byte previous.
+ *
+ * No, we cannot just decrement all rip values.
+ * Sometimes an rip legally points to the start of a
+ * function, e.g. interrupted code or hand crafted
+ * assembler.
+ */
+ if (prev_noret) {
+ kdbnearsym(rip, &symtab);
+ if (rip == symtab.sym_start) {
+ --rip;
+ if (KDB_DEBUG(ARA))
+ kdb_printf("\tprev_noret, " RIP
+ "=0x%lx\n", rip);
+ }
+ }
+ }
+ kdbnearsym(rip, &symtab);
+ if (old_style) {
+ if (__kernel_text_address(rip) && !suppress) {
+ bt_print_one(rip, rsp, &ar, &symtab, 0);
+ ++count;
+ }
+ if (rsp == (unsigned long)regs) {
+ if (ar.stack.next && suppress)
+ kdb_printf(" ======================= <%s>\n",
+ ar.stack.id);
+ ++count;
+ suppress = 0;
+ }
+ rsp += sizeof(rip);
+ rip_at_rsp = 1;
+ if (rsp >= ar.stack.logical_end) {
+ if (!ar.stack.next)
+ break;
+ kdba_bt_new_stack(&ar, &rsp, &count, &suppress);
+ rip_at_rsp = 0;
+ continue;
+ }
+ } else {
+ /* Start each analysis with no dynamic data from the
+ * previous kdb_bb() run.
+ */
+ bb_cleanup();
+ kdb_bb(rip);
+ if (bb_giveup)
+ break;
+ prev_interrupt_handler = interrupt_handler;
+ interrupt_handler = bb_interrupt_handler(rip);
+ prev_rsp = rsp;
+ if (rip_at_rsp) {
+ if (prev_interrupt_handler) {
+ cs = *((kdb_machreg_t *)rsp + 1) & 0xffff;
+ hardware_pushed =
+ bb_hardware_pushed_arch(rsp, &ar);
+ } else {
+ cs = __KERNEL_CS;
+ hardware_pushed = 0;
+ }
+ rsp += sizeof(rip) + hardware_pushed;
+ if (KDB_DEBUG(ARA))
+ kdb_printf("%s: " RSP " "
+ kdb_machreg_fmt0
+ " -> " kdb_machreg_fmt0
+ " hardware_pushed %d"
+ " prev_interrupt_handler %d"
+ " cs 0x%lx\n",
+ __FUNCTION__,
+ prev_rsp,
+ rsp,
+ hardware_pushed,
+ prev_interrupt_handler,
+ cs);
+ if (rsp >= ar.stack.logical_end &&
+ ar.stack.next) {
+ kdba_bt_new_stack(&ar, &rsp, &count,
+ &suppress);
+ rip_at_rsp = 0;
+ continue;
+ }
+ bb_actual_set_value(BBRG_RSP, rsp);
+ } else {
+ cs = __KERNEL_CS;
+ }
+ rip_at_rsp = 1;
+ bb_actual_rollback(&ar);
+ if (bb_giveup)
+ break;
+ if (bb_actual_value(BBRG_RSP) < rsp) {
+ kdb_printf("%s: " RSP " is going backwards, "
+ kdb_machreg_fmt0 " -> "
+ kdb_machreg_fmt0 "\n",
+ __FUNCTION__,
+ rsp,
+ bb_actual_value(BBRG_RSP));
+ bb_giveup = 1;
+ break;
+ }
+ bb_arguments(&ar);
+ if (!suppress) {
+ bt_print_one(rip, prev_rsp, &ar, &symtab, argcount);
+ ++count;
+ }
+ /* Functions that terminate the backtrace */
+ if (strcmp(bb_func_name, "cpu_idle") == 0 ||
+ strcmp(bb_func_name, "child_rip") == 0)
+ break;
+ if (rsp >= ar.stack.logical_end &&
+ !ar.stack.next)
+ break;
+ if (rsp <= (unsigned long)regs &&
+ bb_actual_value(BBRG_RSP) > (unsigned long)regs) {
+ if (ar.stack.next && suppress)
+ kdb_printf(" ======================= <%s>\n",
+ ar.stack.id);
+ ++count;
+ suppress = 0;
+ }
+ if (cs != __KERNEL_CS) {
+ kdb_printf("Reached user space\n");
+ break;
+ }
+ rsp = bb_actual_value(BBRG_RSP);
+ }
+ prev_noret = bb_noret(bb_func_name);
+ if (count > 200)
+ break;
+ }
+ if (bb_giveup)
+ return 1;
+ bb_cleanup();
+ kdbnearsym_cleanup();
+
+ if (count > 200) {
+ kdb_printf("bt truncated, count limit reached\n");
+ return 1;
+ } else if (suppress) {
+ kdb_printf
+ ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * kdba_bt_address
+ *
+ * Do a backtrace starting at a specified stack address. Use this if the
+ * heuristics get the stack decode wrong.
+ *
+ * Inputs:
+ * addr Address provided to 'bt' command.
+ * argcount
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * mds %rsp comes in handy when examining the stack to do a manual
+ * traceback.
+ */
+
+int kdba_bt_address(kdb_machreg_t addr, int argcount)
+{
+ int ret;
+ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
+ ret = kdba_bt_stack(addr, argcount, NULL, 0);
+ if (ret == 1)
+ ret = kdba_bt_stack(addr, argcount, NULL, 1);
+ return ret;
+}
+
+/*
+ * kdba_bt_process
+ *
+ * Do a backtrace for a specified process.
+ *
+ * Inputs:
+ * p Struct task pointer extracted by 'bt' command.
+ * argcount
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ */
+
+int kdba_bt_process(const struct task_struct *p, int argcount)
+{
+ int ret;
+ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
+ ret = kdba_bt_stack(0, argcount, p, 0);
+ if (ret == 1)
+ ret = kdba_bt_stack(0, argcount, p, 1);
+ return ret;
+}
+
+static int __init kdba_bt_x86_init(void)
+{
+ int i, c, cp = -1;
+ struct bb_name_state *r;
+
+ kdb_register_repeat("bb1", kdb_bb1, "<vaddr>", "Analyse one basic block", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bb_all", kdb_bb_all, "", "Backtrace check on all built in functions", 0, KDB_REPEAT_NONE);
+
+ /* Split the opcode usage table by the first letter of each set of
+ * opcodes, for faster mapping of opcode to its operand usage.
+ */
+ for (i = 0; i < ARRAY_SIZE(bb_opcode_usage_all); ++i) {
+ c = bb_opcode_usage_all[i].opcode[0] - 'a';
+ if (c != cp) {
+ cp = c;
+ bb_opcode_usage[c].opcode = bb_opcode_usage_all + i;
+ }
+ ++bb_opcode_usage[c].size;
+ }
+
+ bb_common_interrupt = kallsyms_lookup_name("common_interrupt");
+ bb_error_entry = kallsyms_lookup_name("error_entry");
+ bb_ret_from_intr = kallsyms_lookup_name("ret_from_intr");
+ bb_thread_return = kallsyms_lookup_name("thread_return");
+ bb_sync_regs = kallsyms_lookup_name("sync_regs");
+ bb_save_v86_state = kallsyms_lookup_name("save_v86_state");
+ bb__sched_text_start = kallsyms_lookup_name("__sched_text_start");
+ bb__sched_text_end = kallsyms_lookup_name("__sched_text_end");
+ bb_save_args = kallsyms_lookup_name("save_args");
+ bb_save_rest = kallsyms_lookup_name("save_rest");
+ bb_save_paranoid = kallsyms_lookup_name("save_paranoid");
+ for (i = 0, r = bb_special_cases;
+ i < ARRAY_SIZE(bb_special_cases);
+ ++i, ++r) {
+ r->address = kallsyms_lookup_name(r->name);
+ }
+
+#ifdef CONFIG_4KSTACKS
+ kdba_hardirq_ctx = (struct thread_info **)kallsyms_lookup_name("hardirq_ctx");
+ kdba_softirq_ctx = (struct thread_info **)kallsyms_lookup_name("softirq_ctx");
+#endif /* CONFIG_4KSTACKS */
+
+ return 0;
+}
+
+static void __exit kdba_bt_x86_exit(void)
+{
+ kdb_unregister("bb1");
+ kdb_unregister("bb_all");
+}
+
+module_init(kdba_bt_x86_init)
+module_exit(kdba_bt_x86_exit)
--- /dev/null
+++ b/arch/x86/kdb/kdba_id.c
@@ -0,0 +1,261 @@
+/*
+ * Kernel Debugger Architecture Dependent Instruction Disassembly
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+
+/*
+ * kdba_dis_getsym
+ *
+ * Get a symbol for the disassembler.
+ *
+ * Parameters:
+ * addr Address for which to get symbol
+ * dip Pointer to disassemble_info
+ * Returns:
+ * 0
+ * Locking:
+ * Remarks:
+ * Not used for kdb.
+ */
+
+/* ARGSUSED */
+static int
+kdba_dis_getsym(bfd_vma addr, disassemble_info *dip)
+{
+
+ return 0;
+}
+
+/*
+ * kdba_printaddress
+ *
+ * Print (symbolically) an address.
+ *
+ * Parameters:
+ * addr Address for which to get symbol
+ * dip Pointer to disassemble_info
+ * flag True if a ":<tab>" sequence should follow the address
+ * Returns:
+ * 0
+ * Locking:
+ * Remarks:
+ *
+ */
+
+/* ARGSUSED */
+static void
+kdba_printaddress(kdb_machreg_t addr, disassemble_info *dip, int flag)
+{
+ kdb_symtab_t symtab;
+ int spaces = 5;
+ unsigned int offset;
+
+ /*
+ * Print a symbol name or address as necessary.
+ */
+ kdbnearsym(addr, &symtab);
+ if (symtab.sym_name) {
+ /* Do not use kdb_symbol_print here, it always does
+ * kdb_printf but we want dip->fprintf_func.
+ */
+ dip->fprintf_func(dip->stream,
+ "0x%0*lx %s",
+ (int)(2*sizeof(addr)), addr, symtab.sym_name);
+ if ((offset = addr - symtab.sym_start) == 0) {
+ spaces += 4;
+ }
+ else {
+ unsigned int o = offset;
+ while (o >>= 4)
+ --spaces;
+ dip->fprintf_func(dip->stream, "+0x%x", offset);
+ }
+
+ } else {
+ dip->fprintf_func(dip->stream, "0x%lx", addr);
+ }
+
+ if (flag) {
+ if (spaces < 1) {
+ spaces = 1;
+ }
+ dip->fprintf_func(dip->stream, ":%*s", spaces, " ");
+ }
+}
+
+/*
+ * kdba_dis_printaddr
+ *
+ * Print (symbolically) an address. Called by GNU disassembly
+ * code via disassemble_info structure.
+ *
+ * Parameters:
+ * addr Address for which to get symbol
+ * dip Pointer to disassemble_info
+ * Returns:
+ * 0
+ * Locking:
+ * Remarks:
+ * This function will never append ":<tab>" to the printed
+ * symbolic address.
+ */
+
+static void
+kdba_dis_printaddr(bfd_vma addr, disassemble_info *dip)
+{
+ kdba_printaddress(addr, dip, 0);
+}
+
+/*
+ * kdba_dis_getmem
+ *
+ * Fetch 'length' bytes from 'addr' into 'buf'.
+ *
+ * Parameters:
+ * addr Address for which to get symbol
+ * buf Address of buffer to fill with bytes from 'addr'
+ * length Number of bytes to fetch
+ * dip Pointer to disassemble_info
+ * Returns:
+ * 0 if data is available, otherwise error.
+ * Locking:
+ * Remarks:
+ *
+ */
+
+/* ARGSUSED */
+static int
+kdba_dis_getmem(bfd_vma addr, bfd_byte *buf, unsigned int length, disassemble_info *dip)
+{
+ return kdb_getarea_size(buf, addr, length);
+}
+
+/*
+ * kdba_id_parsemode
+ *
+ * Parse IDMODE environment variable string and
+ * set appropriate value into "disassemble_info" structure.
+ *
+ * Parameters:
+ * mode Mode string
+ * dip Disassemble_info structure pointer
+ * Returns:
+ * Locking:
+ * Remarks:
+ * We handle the values 'x86' and '8086' to enable either
+ * 32-bit instruction set or 16-bit legacy instruction set.
+ */
+
+int
+kdba_id_parsemode(const char *mode, disassemble_info *dip)
+{
+ if (mode) {
+ if (strcmp(mode, "x86_64") == 0) {
+ dip->mach = bfd_mach_x86_64;
+ } else if (strcmp(mode, "x86") == 0) {
+ dip->mach = bfd_mach_i386_i386;
+ } else if (strcmp(mode, "8086") == 0) {
+ dip->mach = bfd_mach_i386_i8086;
+ } else {
+ return KDB_BADMODE;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * kdba_check_pc
+ *
+ * Check that the pc is satisfactory.
+ *
+ * Parameters:
+ * pc Program Counter Value.
+ * Returns:
+ * None
+ * Locking:
+ * None.
+ * Remarks:
+ * Can change pc.
+ */
+
+void
+kdba_check_pc(kdb_machreg_t *pc)
+{
+ /* No action */
+}
+
+/*
+ * kdba_id_printinsn
+ *
+ * Format and print a single instruction at 'pc'. Return the
+ * length of the instruction.
+ *
+ * Parameters:
+ * pc Program Counter Value.
+ * dip Disassemble_info structure pointer
+ * Returns:
+ * Length of instruction, -1 for error.
+ * Locking:
+ * None.
+ * Remarks:
+ * Depends on 'IDMODE' environment variable.
+ */
+
+int
+kdba_id_printinsn(kdb_machreg_t pc, disassemble_info *dip)
+{
+ kdba_printaddress(pc, dip, 1);
+ return print_insn_i386_att(pc, dip);
+}
+
+/*
+ * kdba_id_init
+ *
+ * Initialize the architecture dependent elements of
+ * the disassembly information structure
+ * for the GNU disassembler.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+void
+kdba_id_init(disassemble_info *dip)
+{
+ dip->read_memory_func = kdba_dis_getmem;
+ dip->print_address_func = kdba_dis_printaddr;
+ dip->symbol_at_address_func = kdba_dis_getsym;
+
+ dip->flavour = bfd_target_elf_flavour;
+ dip->arch = bfd_arch_i386;
+#ifdef CONFIG_X86_64
+ dip->mach = bfd_mach_x86_64;
+#endif
+#ifdef CONFIG_X86_32
+ dip->mach = bfd_mach_i386_i386;
+#endif
+ dip->endian = BFD_ENDIAN_LITTLE;
+
+ dip->display_endian = BFD_ENDIAN_LITTLE;
+}
--- /dev/null
+++ b/arch/x86/kdb/kdba_io.c
@@ -0,0 +1,666 @@
+/*
+ * Kernel Debugger Architecture Dependent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/keyboard.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+#include "pc_keyb.h"
+
+#ifdef CONFIG_VT_CONSOLE
+#define KDB_BLINK_LED 1
+#else
+#undef KDB_BLINK_LED
+#endif
+
+#ifdef CONFIG_KDB_USB
+
+struct kdb_usb_kbd_info kdb_usb_kbds[KDB_USB_NUM_KEYBOARDS];
+EXPORT_SYMBOL(kdb_usb_kbds);
+
+extern int kdb_no_usb;
+
+static unsigned char kdb_usb_keycode[256] = {
+ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
+ 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
+ 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
+ 27, 43, 84, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106,
+ 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71,
+ 72, 73, 82, 83, 86,127,116,117, 85, 89, 90, 91, 92, 93, 94, 95,
+ 120,121,122,123,134,138,130,132,128,129,131,137,133,135,136,113,
+ 115,114, 0, 0, 0,124, 0,181,182,183,184,185,186,187,188,189,
+ 190,191,192,193,194,195,196,197,198, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113,
+ 150,158,159,128,136,177,178,176,142,152,173,140
+};
+
+/*
+ * kdb_usb_keyboard_attach()
+ * Attach a USB keyboard to kdb.
+ */
+int
+kdb_usb_keyboard_attach(struct urb *urb, unsigned char *buffer,
+ void *poll_func, void *compl_func,
+ kdb_hc_keyboard_attach_t kdb_hc_keyboard_attach,
+ kdb_hc_keyboard_detach_t kdb_hc_keyboard_detach,
+ unsigned int bufsize,
+ struct urb *hid_urb)
+{
+ int i;
+ int rc = -1;
+
+ if (kdb_no_usb)
+ return 0;
+
+ /*
+ * Search through the array of KDB USB keyboards (kdb_usb_kbds)
+ * looking for a free index. If found, assign the keyboard to
+ * the array index.
+ */
+
+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) {
+ if (kdb_usb_kbds[i].urb) /* index is already assigned */
+ continue;
+
+ /* found a free array index */
+ kdb_usb_kbds[i].urb = urb;
+ kdb_usb_kbds[i].buffer = buffer;
+ kdb_usb_kbds[i].poll_func = poll_func;
+
+ kdb_usb_kbds[i].kdb_hc_urb_complete = compl_func;
+ kdb_usb_kbds[i].kdb_hc_keyboard_attach = kdb_hc_keyboard_attach;
+ kdb_usb_kbds[i].kdb_hc_keyboard_detach = kdb_hc_keyboard_detach;
+
+ /* USB Host Controller specific Keyboadr attach callback.
+ * Currently only UHCI has this callback.
+ */
+ if (kdb_usb_kbds[i].kdb_hc_keyboard_attach)
+ kdb_usb_kbds[i].kdb_hc_keyboard_attach(i, bufsize);
+
+ rc = 0; /* success */
+
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_attach);
+
+/*
+ * kdb_usb_keyboard_detach()
+ * Detach a USB keyboard from kdb.
+ */
+int
+kdb_usb_keyboard_detach(struct urb *urb)
+{
+ int i;
+ int rc = -1;
+
+ if (kdb_no_usb)
+ return 0;
+
+ /*
+ * Search through the array of KDB USB keyboards (kdb_usb_kbds)
+ * looking for the index with the matching URB. If found,
+ * clear the array index.
+ */
+
+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) {
+ if ((kdb_usb_kbds[i].urb != urb) &&
+ (kdb_usb_kbds[i].hid_urb != urb))
+ continue;
+
+ /* found it, clear the index */
+
+ /* USB Host Controller specific Keyboard detach callback.
+ * Currently only UHCI has this callback.
+ */
+ if (kdb_usb_kbds[i].kdb_hc_keyboard_detach)
+ kdb_usb_kbds[i].kdb_hc_keyboard_detach(urb, i);
+
+ kdb_usb_kbds[i].urb = NULL;
+ kdb_usb_kbds[i].buffer = NULL;
+ kdb_usb_kbds[i].poll_func = NULL;
+ kdb_usb_kbds[i].caps_lock = 0;
+ kdb_usb_kbds[i].hid_urb = NULL;
+
+ rc = 0; /* success */
+
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL (kdb_usb_keyboard_detach);
+
+/*
+ * get_usb_char
+ * This function drives the USB attached keyboards.
+ * Fetch the USB scancode and decode it.
+ */
+static int
+get_usb_char(void)
+{
+ int i;
+ unsigned char keycode, spec;
+ extern u_short plain_map[], shift_map[], ctrl_map[];
+ int ret = 1;
+ int ret_key = -1, j, max;
+
+ if (kdb_no_usb)
+ return -1;
+
+ /*
+ * Loop through all the USB keyboard(s) and return
+ * the first character obtained from them.
+ */
+
+ for (i = 0; i < KDB_USB_NUM_KEYBOARDS; i++) {
+ /* skip uninitialized keyboard array entries */
+ if (!kdb_usb_kbds[i].urb || !kdb_usb_kbds[i].buffer ||
+ !kdb_usb_kbds[i].poll_func)
+ continue;
+
+ /* Transfer char */
+ ret = (*kdb_usb_kbds[i].poll_func)(kdb_usb_kbds[i].urb);
+ if (ret == -EBUSY && kdb_usb_kbds[i].poll_ret != -EBUSY)
+ kdb_printf("NOTICE: USB HD driver BUSY. "
+ "USB keyboard has been disabled.\n");
+
+ kdb_usb_kbds[i].poll_ret = ret;
+
+ if (ret < 0) /* error or no characters, try the next kbd */
+ continue;
+
+ /* If 2 keys was pressed simultaneously,
+ * both keycodes will be in buffer.
+ * Last pressed key will be last non
+ * zero byte.
+ */
+ for (j=0; j<4; j++){
+ if (!kdb_usb_kbds[i].buffer[2+j])
+ break;
+ }
+ /* Last pressed key */
+ max = j + 1;
+
+ spec = kdb_usb_kbds[i].buffer[0];
+ keycode = kdb_usb_kbds[i].buffer[2];
+ kdb_usb_kbds[i].buffer[0] = (char)0;
+ kdb_usb_kbds[i].buffer[2] = (char)0;
+
+ ret_key = -1;
+
+ /* A normal key is pressed, decode it */
+ if(keycode)
+ keycode = kdb_usb_keycode[keycode];
+
+ /* 2 Keys pressed at one time ? */
+ if (spec && keycode) {
+ switch(spec)
+ {
+ case 0x2:
+ case 0x20: /* Shift */
+ ret_key = shift_map[keycode];
+ break;
+ case 0x1:
+ case 0x10: /* Ctrl */
+ ret_key = ctrl_map[keycode];
+ break;
+ case 0x4:
+ case 0x40: /* Alt */
+ break;
+ }
+ } else if (keycode) { /* If only one key pressed */
+ switch(keycode)
+ {
+ case 0x1C: /* Enter */
+ ret_key = 13;
+ break;
+
+ case 0x3A: /* Capslock */
+ kdb_usb_kbds[i].caps_lock = !(kdb_usb_kbds[i].caps_lock);
+ break;
+ case 0x0E: /* Backspace */
+ ret_key = 8;
+ break;
+ case 0x0F: /* TAB */
+ ret_key = 9;
+ break;
+ case 0x77: /* Pause */
+ break ;
+ default:
+ if(!kdb_usb_kbds[i].caps_lock) {
+ ret_key = plain_map[keycode];
+ }
+ else {
+ ret_key = shift_map[keycode];
+ }
+ }
+ }
+
+ if (ret_key != 1) {
+ /* Key was pressed, return keycode */
+
+ /* Clear buffer before urb resending */
+ if (kdb_usb_kbds[i].buffer)
+ for(j=0; j<8; j++)
+ kdb_usb_kbds[i].buffer[j] = (char)0;
+
+ /* USB Host Controller specific Urb complete callback.
+ * Currently only UHCI has this callback.
+ */
+ if (kdb_usb_kbds[i].kdb_hc_urb_complete)
+ (*kdb_usb_kbds[i].kdb_hc_urb_complete)((struct urb *)kdb_usb_kbds[i].urb);
+
+ return ret_key;
+ }
+ }
+
+
+
+ /* no chars were returned from any of the USB keyboards */
+
+ return -1;
+}
+#endif /* CONFIG_KDB_USB */
+
+/*
+ * This module contains code to read characters from the keyboard or a serial
+ * port.
+ *
+ * It is used by the kernel debugger, and is polled, not interrupt driven.
+ *
+ */
+
+#ifdef KDB_BLINK_LED
+/*
+ * send: Send a byte to the keyboard controller. Used primarily to
+ * alter LED settings.
+ */
+
+static void
+kdb_kbdsend(unsigned char byte)
+{
+ int timeout;
+ for (timeout = 200 * 1000; timeout && (inb(KBD_STATUS_REG) & KBD_STAT_IBF); timeout--);
+ outb(byte, KBD_DATA_REG);
+ udelay(40);
+ for (timeout = 200 * 1000; timeout && (~inb(KBD_STATUS_REG) & KBD_STAT_OBF); timeout--);
+ inb(KBD_DATA_REG);
+ udelay(40);
+}
+
+static void
+kdb_toggleled(int led)
+{
+ static int leds;
+
+ leds ^= led;
+
+ kdb_kbdsend(KBD_CMD_SET_LEDS);
+ kdb_kbdsend((unsigned char)leds);
+}
+#endif /* KDB_BLINK_LED */
+
+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_CORE_CONSOLE)
+#define CONFIG_SERIAL_CONSOLE
+#endif
+
+#if defined(CONFIG_SERIAL_CONSOLE)
+
+struct kdb_serial kdb_serial;
+
+static unsigned int
+serial_inp(struct kdb_serial *kdb_serial, unsigned long offset)
+{
+ offset <<= kdb_serial->ioreg_shift;
+
+ switch (kdb_serial->io_type) {
+ case SERIAL_IO_MEM:
+ return readb((void __iomem *)(kdb_serial->iobase + offset));
+ break;
+ default:
+ return inb(kdb_serial->iobase + offset);
+ break;
+ }
+}
+
+/* Check if there is a byte ready at the serial port */
+static int get_serial_char(void)
+{
+ unsigned char ch;
+
+ if (kdb_serial.iobase == 0)
+ return -1;
+
+ if (serial_inp(&kdb_serial, UART_LSR) & UART_LSR_DR) {
+ ch = serial_inp(&kdb_serial, UART_RX);
+ if (ch == 0x7f)
+ ch = 8;
+ return ch;
+ }
+ return -1;
+}
+#endif /* CONFIG_SERIAL_CONSOLE */
+
+#ifdef CONFIG_VT_CONSOLE
+
+static int kbd_exists;
+
+/*
+ * Check if the keyboard controller has a keypress for us.
+ * Some parts (Enter Release, LED change) are still blocking polled here,
+ * but hopefully they are all short.
+ */
+static int get_kbd_char(void)
+{
+ int scancode, scanstatus;
+ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */
+ static int shift_key; /* Shift next keypress */
+ static int ctrl_key;
+ u_short keychar;
+ extern u_short plain_map[], shift_map[], ctrl_map[];
+
+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
+ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
+ kbd_exists = 0;
+ return -1;
+ }
+ kbd_exists = 1;
+
+ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ return -1;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ /*
+ * Ignore mouse events.
+ */
+ if (scanstatus & KBD_STAT_MOUSE_OBF)
+ return -1;
+
+ /*
+ * Ignore release, trigger on make
+ * (except for shift keys, where we want to
+ * keep the shift state so long as the key is
+ * held down).
+ */
+
+ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
+ /*
+ * Next key may use shift table
+ */
+ if ((scancode & 0x80) == 0) {
+ shift_key=1;
+ } else {
+ shift_key=0;
+ }
+ return -1;
+ }
+
+ if ((scancode&0x7f) == 0x1d) {
+ /*
+ * Left ctrl key
+ */
+ if ((scancode & 0x80) == 0) {
+ ctrl_key = 1;
+ } else {
+ ctrl_key = 0;
+ }
+ return -1;
+ }
+
+ if ((scancode & 0x80) != 0)
+ return -1;
+
+ scancode &= 0x7f;
+
+ /*
+ * Translate scancode
+ */
+
+ if (scancode == 0x3a) {
+ /*
+ * Toggle caps lock
+ */
+ shift_lock ^= 1;
+
+#ifdef KDB_BLINK_LED
+ kdb_toggleled(0x4);
+#endif
+ return -1;
+ }
+
+ if (scancode == 0x0e) {
+ /*
+ * Backspace
+ */
+ return 8;
+ }
+
+ /* Special Key */
+ switch (scancode) {
+ case 0xF: /* Tab */
+ return 9;
+ case 0x53: /* Del */
+ return 4;
+ case 0x47: /* Home */
+ return 1;
+ case 0x4F: /* End */
+ return 5;
+ case 0x4B: /* Left */
+ return 2;
+ case 0x48: /* Up */
+ return 16;
+ case 0x50: /* Down */
+ return 14;
+ case 0x4D: /* Right */
+ return 6;
+ }
+
+ if (scancode == 0xe0) {
+ return -1;
+ }
+
+ /*
+ * For Japanese 86/106 keyboards
+ * See comment in drivers/char/pc_keyb.c.
+ * - Masahiro Adegawa
+ */
+ if (scancode == 0x73) {
+ scancode = 0x59;
+ } else if (scancode == 0x7d) {
+ scancode = 0x7c;
+ }
+
+ if (!shift_lock && !shift_key && !ctrl_key) {
+ keychar = plain_map[scancode];
+ } else if (shift_lock || shift_key) {
+ keychar = shift_map[scancode];
+ } else if (ctrl_key) {
+ keychar = ctrl_map[scancode];
+ } else {
+ keychar = 0x0020;
+ kdb_printf("Unknown state/scancode (%d)\n", scancode);
+ }
+ keychar &= 0x0fff;
+ if (keychar == '\t')
+ keychar = ' ';
+ switch (KTYP(keychar)) {
+ case KT_LETTER:
+ case KT_LATIN:
+ if (isprint(keychar))
+ break; /* printable characters */
+ /* drop through */
+ case KT_SPEC:
+ if (keychar == K_ENTER)
+ break;
+ /* drop through */
+ default:
+ return(-1); /* ignore unprintables */
+ }
+
+ if ((scancode & 0x7f) == 0x1c) {
+ /*
+ * enter key. All done. Absorb the release scancode.
+ */
+ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ ;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ while (scanstatus & KBD_STAT_MOUSE_OBF) {
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+ }
+
+ if (scancode != 0x9c) {
+ /*
+ * Wasn't an enter-release, why not?
+ */
+ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
+ scancode, scanstatus);
+ }
+
+ kdb_printf("\n");
+ return 13;
+ }
+
+ return keychar & 0xff;
+}
+#endif /* CONFIG_VT_CONSOLE */
+
+#ifdef KDB_BLINK_LED
+
+/* Leave numlock alone, setting it messes up laptop keyboards with the keypad
+ * mapped over normal keys.
+ */
+static int kdba_blink_mask = 0x1 | 0x4;
+
+#define BOGOMIPS (boot_cpu_data.loops_per_jiffy/(500000/HZ))
+static int blink_led(void)
+{
+ static long delay;
+
+ if (kbd_exists == 0)
+ return -1;
+
+ if (--delay < 0) {
+ if (BOGOMIPS == 0) /* early kdb */
+ delay = 150000000/1000; /* arbitrary bogomips */
+ else
+ delay = 150000000/BOGOMIPS; /* Roughly 1 second when polling */
+ kdb_toggleled(kdba_blink_mask);
+ }
+ return -1;
+}
+#endif
+
+get_char_func poll_funcs[] = {
+#if defined(CONFIG_VT_CONSOLE)
+ get_kbd_char,
+#endif
+#if defined(CONFIG_SERIAL_CONSOLE)
+ get_serial_char,
+#endif
+#ifdef KDB_BLINK_LED
+ blink_led,
+#endif
+#ifdef CONFIG_KDB_USB
+ get_usb_char,
+#endif
+ NULL
+};
+
+/*
+ * On some Compaq Deskpro's, there is a keyboard freeze many times after
+ * exiting from the kdb. As kdb's keyboard handler is not interrupt-driven and
+ * uses a polled interface, it makes more sense to disable motherboard keyboard
+ * controller's OBF interrupts during kdb's polling.In case, of interrupts
+ * remaining enabled during kdb's polling, it may cause un-necessary
+ * interrupts being signalled during keypresses, which are also sometimes seen
+ * as spurious interrupts after exiting from kdb. This hack to disable OBF
+ * interrupts before entry to kdb and re-enabling them at kdb exit point also
+ * solves the keyboard freeze issue. These functions are called from
+ * kdb_local(), hence these are arch. specific setup and cleanup functions
+ * executing only on the local processor - ashishk@sco.com
+ */
+
+void kdba_local_arch_setup(void)
+{
+#ifdef CONFIG_VT_CONSOLE
+ int timeout;
+ unsigned char c;
+
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_command(KBD_CCMD_READ_MODE);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ for (timeout = 200 * 1000; timeout &&
+ (!(kbd_read_status() & KBD_STAT_OBF)); timeout--);
+ c = kbd_read_input();
+ c &= ~KBD_MODE_KBD_INT;
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_command(KBD_CCMD_WRITE_MODE);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_output(c);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ mdelay(1);
+#endif /* CONFIG_VT_CONSOLE */
+}
+
+void kdba_local_arch_cleanup(void)
+{
+#ifdef CONFIG_VT_CONSOLE
+ int timeout;
+ unsigned char c;
+
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_command(KBD_CCMD_READ_MODE);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ for (timeout = 200 * 1000; timeout &&
+ (!(kbd_read_status() & KBD_STAT_OBF)); timeout--);
+ c = kbd_read_input();
+ c |= KBD_MODE_KBD_INT;
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_command(KBD_CCMD_WRITE_MODE);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ kbd_write_output(c);
+ mdelay(1);
+ while (kbd_read_status() & KBD_STAT_IBF);
+ mdelay(1);
+#endif /* CONFIG_VT_CONSOLE */
+}
--- /dev/null
+++ b/arch/x86/kdb/kdba_support.c
@@ -0,0 +1,1536 @@
+/*
+ * Kernel Debugger Architecture Independent Support Functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2008 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/cpumask.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+
+static kdb_machreg_t
+kdba_getcr(int regnum)
+{
+ kdb_machreg_t contents = 0;
+ switch(regnum) {
+ case 0:
+ __asm__ (_ASM_MOV " %%cr0,%0\n\t":"=r"(contents));
+ break;
+ case 1:
+ break;
+ case 2:
+ __asm__ (_ASM_MOV " %%cr2,%0\n\t":"=r"(contents));
+ break;
+ case 3:
+ __asm__ (_ASM_MOV " %%cr3,%0\n\t":"=r"(contents));
+ break;
+ case 4:
+ __asm__ (_ASM_MOV " %%cr4,%0\n\t":"=r"(contents));
+ break;
+ default:
+ break;
+ }
+
+ return contents;
+}
+
+void
+kdba_putdr(int regnum, kdb_machreg_t contents)
+{
+ switch(regnum) {
+ case 0:
+ __asm__ (_ASM_MOV " %0,%%db0\n\t"::"r"(contents));
+ break;
+ case 1:
+ __asm__ (_ASM_MOV " %0,%%db1\n\t"::"r"(contents));
+ break;
+ case 2:
+ __asm__ (_ASM_MOV " %0,%%db2\n\t"::"r"(contents));
+ break;
+ case 3:
+ __asm__ (_ASM_MOV " %0,%%db3\n\t"::"r"(contents));
+ break;
+ case 4:
+ case 5:
+ break;
+ case 6:
+ __asm__ (_ASM_MOV " %0,%%db6\n\t"::"r"(contents));
+ break;
+ case 7:
+ __asm__ (_ASM_MOV " %0,%%db7\n\t"::"r"(contents));
+ break;
+ default:
+ break;
+ }
+}
+
+kdb_machreg_t
+kdba_getdr(int regnum)
+{
+ kdb_machreg_t contents = 0;
+ switch(regnum) {
+ case 0:
+ __asm__ (_ASM_MOV " %%db0,%0\n\t":"=r"(contents));
+ break;
+ case 1:
+ __asm__ (_ASM_MOV " %%db1,%0\n\t":"=r"(contents));
+ break;
+ case 2:
+ __asm__ (_ASM_MOV " %%db2,%0\n\t":"=r"(contents));
+ break;
+ case 3:
+ __asm__ (_ASM_MOV " %%db3,%0\n\t":"=r"(contents));
+ break;
+ case 4:
+ case 5:
+ break;
+ case 6:
+ __asm__ (_ASM_MOV " %%db6,%0\n\t":"=r"(contents));
+ break;
+ case 7:
+ __asm__ (_ASM_MOV " %%db7,%0\n\t":"=r"(contents));
+ break;
+ default:
+ break;
+ }
+
+ return contents;
+}
+
+kdb_machreg_t
+kdba_getdr6(void)
+{
+ return kdba_getdr(6);
+}
+
+kdb_machreg_t
+kdba_getdr7(void)
+{
+ return kdba_getdr(7);
+}
+
+void
+kdba_putdr6(kdb_machreg_t contents)
+{
+ kdba_putdr(6, contents);
+}
+
+static void
+kdba_putdr7(kdb_machreg_t contents)
+{
+ kdba_putdr(7, contents);
+}
+
+void
+kdba_installdbreg(kdb_bp_t *bp)
+{
+ int cpu = smp_processor_id();
+
+ kdb_machreg_t dr7;
+
+ dr7 = kdba_getdr7();
+
+ kdba_putdr(bp->bp_hard[cpu]->bph_reg, bp->bp_addr);
+
+ dr7 |= DR7_GE;
+ if (cpu_has_de)
+ set_in_cr4(X86_CR4_DE);
+
+ switch (bp->bp_hard[cpu]->bph_reg){
+ case 0:
+ DR7_RW0SET(dr7,bp->bp_hard[cpu]->bph_mode);
+ DR7_LEN0SET(dr7,bp->bp_hard[cpu]->bph_length);
+ DR7_G0SET(dr7);
+ break;
+ case 1:
+ DR7_RW1SET(dr7,bp->bp_hard[cpu]->bph_mode);
+ DR7_LEN1SET(dr7,bp->bp_hard[cpu]->bph_length);
+ DR7_G1SET(dr7);
+ break;
+ case 2:
+ DR7_RW2SET(dr7,bp->bp_hard[cpu]->bph_mode);
+ DR7_LEN2SET(dr7,bp->bp_hard[cpu]->bph_length);
+ DR7_G2SET(dr7);
+ break;
+ case 3:
+ DR7_RW3SET(dr7,bp->bp_hard[cpu]->bph_mode);
+ DR7_LEN3SET(dr7,bp->bp_hard[cpu]->bph_length);
+ DR7_G3SET(dr7);
+ break;
+ default:
+ kdb_printf("kdb: Bad debug register!! %ld\n",
+ bp->bp_hard[cpu]->bph_reg);
+ break;
+ }
+
+ kdba_putdr7(dr7);
+ return;
+}
+
+void
+kdba_removedbreg(kdb_bp_t *bp)
+{
+ int regnum;
+ kdb_machreg_t dr7;
+ int cpu = smp_processor_id();
+
+ if (!bp->bp_hard[cpu])
+ return;
+
+ regnum = bp->bp_hard[cpu]->bph_reg;
+
+ dr7 = kdba_getdr7();
+
+ kdba_putdr(regnum, 0);
+
+ switch (regnum) {
+ case 0:
+ DR7_G0CLR(dr7);
+ DR7_L0CLR(dr7);
+ break;
+ case 1:
+ DR7_G1CLR(dr7);
+ DR7_L1CLR(dr7);
+ break;
+ case 2:
+ DR7_G2CLR(dr7);
+ DR7_L2CLR(dr7);
+ break;
+ case 3:
+ DR7_G3CLR(dr7);
+ DR7_L3CLR(dr7);
+ break;
+ default:
+ kdb_printf("kdb: Bad debug register!! %d\n", regnum);
+ break;
+ }
+
+ kdba_putdr7(dr7);
+}
+
+struct kdbregs {
+ char *reg_name;
+ size_t reg_offset;
+};
+
+static struct kdbregs dbreglist[] = {
+ { "dr0", 0 },
+ { "dr1", 1 },
+ { "dr2", 2 },
+ { "dr3", 3 },
+ { "dr6", 6 },
+ { "dr7", 7 },
+};
+
+static const int ndbreglist = sizeof(dbreglist) / sizeof(struct kdbregs);
+
+#ifdef CONFIG_X86_32
+static struct kdbregs kdbreglist[] = {
+ { "ax", offsetof(struct pt_regs, ax) },
+ { "bx", offsetof(struct pt_regs, bx) },
+ { "cx", offsetof(struct pt_regs, cx) },
+ { "dx", offsetof(struct pt_regs, dx) },
+
+ { "si", offsetof(struct pt_regs, si) },
+ { "di", offsetof(struct pt_regs, di) },
+ { "sp", offsetof(struct pt_regs, sp) },
+ { "ip", offsetof(struct pt_regs, ip) },
+
+ { "bp", offsetof(struct pt_regs, bp) },
+ { "ss", offsetof(struct pt_regs, ss) },
+ { "cs", offsetof(struct pt_regs, cs) },
+ { "flags", offsetof(struct pt_regs, flags) },
+
+ { "ds", offsetof(struct pt_regs, ds) },
+ { "es", offsetof(struct pt_regs, es) },
+ { "origax", offsetof(struct pt_regs, orig_ax) },
+
+};
+
+static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs);
+
+
+/*
+ * kdba_getregcontents
+ *
+ * Return the contents of the register specified by the
+ * input string argument. Return an error if the string
+ * does not match a machine register.
+ *
+ * The following pseudo register names are supported:
+ * &regs - Prints address of exception frame
+ * kesp - Prints kernel stack pointer at time of fault
+ * cesp - Prints current kernel stack pointer, inside kdb
+ * ceflags - Prints current flags, inside kdb
+ * %<regname> - Uses the value of the registers at the
+ * last time the user process entered kernel
+ * mode, instead of the registers at the time
+ * kdb was entered.
+ *
+ * Parameters:
+ * regname Pointer to string naming register
+ * regs Pointer to structure containing registers.
+ * Outputs:
+ * *contents Pointer to unsigned long to recieve register contents
+ * Returns:
+ * 0 Success
+ * KDB_BADREG Invalid register name
+ * Locking:
+ * None.
+ * Remarks:
+ * If kdb was entered via an interrupt from the kernel itself then
+ * ss and sp are *not* on the stack.
+ */
+
+int
+kdba_getregcontents(const char *regname,
+ struct pt_regs *regs,
+ kdb_machreg_t *contents)
+{
+ int i;
+
+ if (strcmp(regname, "cesp") == 0) {
+ asm volatile("movl %%esp,%0":"=m" (*contents));
+ return 0;
+ }
+
+ if (strcmp(regname, "ceflags") == 0) {
+ unsigned long flags;
+ local_save_flags(flags);
+ *contents = flags;
+ return 0;
+ }
+
+ if (regname[0] == '%') {
+ /* User registers: %%e[a-c]x, etc */
+ regname++;
+ regs = (struct pt_regs *)
+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs));
+ }
+
+ for (i=0; i<ndbreglist; i++) {
+ if (strnicmp(dbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < ndbreglist)
+ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
+ *contents = kdba_getdr(dbreglist[i].reg_offset);
+ return 0;
+ }
+
+ if (!regs) {
+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
+ return KDB_BADREG;
+ }
+
+ if (strcmp(regname, "&regs") == 0) {
+ *contents = (unsigned long)regs;
+ return 0;
+ }
+
+ if (strcmp(regname, "kesp") == 0) {
+ *contents = (unsigned long)regs + sizeof(struct pt_regs);
+ if ((regs->cs & 0xffff) == __KERNEL_CS) {
+ /* sp and ss are not on stack */
+ *contents -= 2*4;
+ }
+ return 0;
+ }
+
+ for (i=0; i<nkdbreglist; i++) {
+ if (strnicmp(kdbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < nkdbreglist)
+ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
+ if ((regs->cs & 0xffff) == __KERNEL_CS) {
+ /* No cpl switch, sp and ss are not on stack */
+ if (strcmp(kdbreglist[i].reg_name, "sp") == 0) {
+ *contents = (kdb_machreg_t)regs +
+ sizeof(struct pt_regs) - 2*4;
+ return(0);
+ }
+ if (strcmp(kdbreglist[i].reg_name, "xss") == 0) {
+ asm volatile(
+ "pushl %%ss\n"
+ "popl %0\n"
+ :"=m" (*contents));
+ return(0);
+ }
+ }
+ *contents = *(unsigned long *)((unsigned long)regs +
+ kdbreglist[i].reg_offset);
+ return(0);
+ }
+
+ return KDB_BADREG;
+}
+
+/*
+ * kdba_setregcontents
+ *
+ * Set the contents of the register specified by the
+ * input string argument. Return an error if the string
+ * does not match a machine register.
+ *
+ * Supports modification of user-mode registers via
+ * %<register-name>
+ *
+ * Parameters:
+ * regname Pointer to string naming register
+ * regs Pointer to structure containing registers.
+ * contents Unsigned long containing new register contents
+ * Outputs:
+ * Returns:
+ * 0 Success
+ * KDB_BADREG Invalid register name
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+int
+kdba_setregcontents(const char *regname,
+ struct pt_regs *regs,
+ unsigned long contents)
+{
+ int i;
+
+ if (regname[0] == '%') {
+ regname++;
+ regs = (struct pt_regs *)
+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs));
+ }
+
+ for (i=0; i<ndbreglist; i++) {
+ if (strnicmp(dbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < ndbreglist)
+ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
+ kdba_putdr(dbreglist[i].reg_offset, contents);
+ return 0;
+ }
+
+ if (!regs) {
+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
+ return KDB_BADREG;
+ }
+
+ for (i=0; i<nkdbreglist; i++) {
+ if (strnicmp(kdbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < nkdbreglist)
+ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
+ *(unsigned long *)((unsigned long)regs
+ + kdbreglist[i].reg_offset) = contents;
+ return 0;
+ }
+
+ return KDB_BADREG;
+}
+
+/*
+ * kdba_pt_regs
+ *
+ * Format a struct pt_regs
+ *
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * If no address is supplied, it uses the last irq pt_regs.
+ */
+
+static int
+kdba_pt_regs(int argc, const char **argv)
+{
+ int diag;
+ kdb_machreg_t addr;
+ long offset = 0;
+ int nextarg;
+ struct pt_regs *p;
+ static const char *fmt = " %-11.11s 0x%lx\n";
+
+ if (argc == 0) {
+ addr = (kdb_machreg_t) get_irq_regs();
+ } else if (argc == 1) {
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ } else {
+ return KDB_ARGCOUNT;
+ }
+
+ p = (struct pt_regs *) addr;
+ kdb_printf("struct pt_regs 0x%p-0x%p\n", p, (unsigned char *)p + sizeof(*p) - 1);
+ kdb_print_nameval("bx", p->bx);
+ kdb_print_nameval("cx", p->cx);
+ kdb_print_nameval("dx", p->dx);
+ kdb_print_nameval("si", p->si);
+ kdb_print_nameval("di", p->di);
+ kdb_print_nameval("bp", p->bp);
+ kdb_print_nameval("ax", p->ax);
+ kdb_printf(fmt, "ds", p->ds);
+ kdb_printf(fmt, "es", p->es);
+ kdb_print_nameval("orig_ax", p->orig_ax);
+ kdb_print_nameval("ip", p->ip);
+ kdb_printf(fmt, "cs", p->cs);
+ kdb_printf(fmt, "flags", p->flags);
+ kdb_printf(fmt, "sp", p->sp);
+ kdb_printf(fmt, "ss", p->ss);
+ return 0;
+}
+
+#else /* CONFIG_X86_32 */
+
+static struct kdbregs kdbreglist[] = {
+ { "r15", offsetof(struct pt_regs, r15) },
+ { "r14", offsetof(struct pt_regs, r14) },
+ { "r13", offsetof(struct pt_regs, r13) },
+ { "r12", offsetof(struct pt_regs, r12) },
+ { "bp", offsetof(struct pt_regs, bp) },
+ { "bx", offsetof(struct pt_regs, bx) },
+ { "r11", offsetof(struct pt_regs, r11) },
+ { "r10", offsetof(struct pt_regs, r10) },
+ { "r9", offsetof(struct pt_regs, r9) },
+ { "r8", offsetof(struct pt_regs, r8) },
+ { "ax", offsetof(struct pt_regs, ax) },
+ { "cx", offsetof(struct pt_regs, cx) },
+ { "dx", offsetof(struct pt_regs, dx) },
+ { "si", offsetof(struct pt_regs, si) },
+ { "di", offsetof(struct pt_regs, di) },
+ { "orig_ax", offsetof(struct pt_regs, orig_ax) },
+ { "ip", offsetof(struct pt_regs, ip) },
+ { "cs", offsetof(struct pt_regs, cs) },
+ { "flags", offsetof(struct pt_regs, flags) },
+ { "sp", offsetof(struct pt_regs, sp) },
+ { "ss", offsetof(struct pt_regs, ss) },
+};
+
+static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs);
+
+
+/*
+ * kdba_getregcontents
+ *
+ * Return the contents of the register specified by the
+ * input string argument. Return an error if the string
+ * does not match a machine register.
+ *
+ * The following pseudo register names are supported:
+ * &regs - Prints address of exception frame
+ * krsp - Prints kernel stack pointer at time of fault
+ * crsp - Prints current kernel stack pointer, inside kdb
+ * ceflags - Prints current flags, inside kdb
+ * %<regname> - Uses the value of the registers at the
+ * last time the user process entered kernel
+ * mode, instead of the registers at the time
+ * kdb was entered.
+ *
+ * Parameters:
+ * regname Pointer to string naming register
+ * regs Pointer to structure containing registers.
+ * Outputs:
+ * *contents Pointer to unsigned long to recieve register contents
+ * Returns:
+ * 0 Success
+ * KDB_BADREG Invalid register name
+ * Locking:
+ * None.
+ * Remarks:
+ * If kdb was entered via an interrupt from the kernel itself then
+ * ss and sp are *not* on the stack.
+ */
+int
+kdba_getregcontents(const char *regname,
+ struct pt_regs *regs,
+ kdb_machreg_t *contents)
+{
+ int i;
+
+ if (strcmp(regname, "&regs") == 0) {
+ *contents = (unsigned long)regs;
+ return 0;
+ }
+
+ if (strcmp(regname, "krsp") == 0) {
+ *contents = (unsigned long)regs + sizeof(struct pt_regs);
+ if ((regs->cs & 0xffff) == __KERNEL_CS) {
+ /* sp and ss are not on stack */
+ *contents -= 2*4;
+ }
+ return 0;
+ }
+
+ if (strcmp(regname, "crsp") == 0) {
+ asm volatile("movq %%rsp,%0":"=m" (*contents));
+ return 0;
+ }
+
+ if (strcmp(regname, "ceflags") == 0) {
+ unsigned long flags;
+ local_save_flags(flags);
+ *contents = flags;
+ return 0;
+ }
+
+ if (regname[0] == '%') {
+ /* User registers: %%r[a-c]x, etc */
+ regname++;
+ regs = (struct pt_regs *)
+ (current->thread.sp0 - sizeof(struct pt_regs));
+ }
+
+ for (i=0; i<nkdbreglist; i++) {
+ if (strnicmp(kdbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < nkdbreglist)
+ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
+ if ((regs->cs & 0xffff) == __KERNEL_CS) {
+ /* No cpl switch, sp is not on stack */
+ if (strcmp(kdbreglist[i].reg_name, "sp") == 0) {
+ *contents = (kdb_machreg_t)regs +
+ sizeof(struct pt_regs) - 2*8;
+ return(0);
+ }
+#if 0 /* FIXME */
+ if (strcmp(kdbreglist[i].reg_name, "ss") == 0) {
+ kdb_machreg_t r;
+
+ r = (kdb_machreg_t)regs +
+ sizeof(struct pt_regs) - 2*8;
+ *contents = (kdb_machreg_t)SS(r); /* XXX */
+ return(0);
+ }
+#endif
+ }
+ *contents = *(unsigned long *)((unsigned long)regs +
+ kdbreglist[i].reg_offset);
+ return(0);
+ }
+
+ for (i=0; i<ndbreglist; i++) {
+ if (strnicmp(dbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < ndbreglist)
+ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
+ *contents = kdba_getdr(dbreglist[i].reg_offset);
+ return 0;
+ }
+ return KDB_BADREG;
+}
+
+/*
+ * kdba_setregcontents
+ *
+ * Set the contents of the register specified by the
+ * input string argument. Return an error if the string
+ * does not match a machine register.
+ *
+ * Supports modification of user-mode registers via
+ * %<register-name>
+ *
+ * Parameters:
+ * regname Pointer to string naming register
+ * regs Pointer to structure containing registers.
+ * contents Unsigned long containing new register contents
+ * Outputs:
+ * Returns:
+ * 0 Success
+ * KDB_BADREG Invalid register name
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+int
+kdba_setregcontents(const char *regname,
+ struct pt_regs *regs,
+ unsigned long contents)
+{
+ int i;
+
+ if (regname[0] == '%') {
+ regname++;
+ regs = (struct pt_regs *)
+ (current->thread.sp0 - sizeof(struct pt_regs));
+ }
+
+ for (i=0; i<nkdbreglist; i++) {
+ if (strnicmp(kdbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < nkdbreglist)
+ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
+ *(unsigned long *)((unsigned long)regs
+ + kdbreglist[i].reg_offset) = contents;
+ return 0;
+ }
+
+ for (i=0; i<ndbreglist; i++) {
+ if (strnicmp(dbreglist[i].reg_name,
+ regname,
+ strlen(regname)) == 0)
+ break;
+ }
+
+ if ((i < ndbreglist)
+ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
+ kdba_putdr(dbreglist[i].reg_offset, contents);
+ return 0;
+ }
+
+ return KDB_BADREG;
+}
+
+/*
+ * kdba_pt_regs
+ *
+ * Format a struct pt_regs
+ *
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * If no address is supplied, it uses the last irq pt_regs.
+ */
+
+static int
+kdba_pt_regs(int argc, const char **argv)
+{
+ int diag;
+ kdb_machreg_t addr;
+ long offset = 0;
+ int nextarg;
+ struct pt_regs *p;
+ static const char *fmt = " %-11.11s 0x%lx\n";
+ static int first_time = 1;
+
+ if (argc == 0) {
+ addr = (kdb_machreg_t) get_irq_regs();
+ } else if (argc == 1) {
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ } else {
+ return KDB_ARGCOUNT;
+ }
+
+ p = (struct pt_regs *) addr;
+ if (first_time) {
+ first_time = 0;
+ kdb_printf("\n+++ Warning: x86_64 pt_regs are not always "
+ "completely defined, r15-bx may be invalid\n\n");
+ }
+ kdb_printf("struct pt_regs 0x%p-0x%p\n", p, (unsigned char *)p + sizeof(*p) - 1);
+ kdb_print_nameval("r15", p->r15);
+ kdb_print_nameval("r14", p->r14);
+ kdb_print_nameval("r13", p->r13);
+ kdb_print_nameval("r12", p->r12);
+ kdb_print_nameval("bp", p->bp);
+ kdb_print_nameval("bx", p->bx);
+ kdb_print_nameval("r11", p->r11);
+ kdb_print_nameval("r10", p->r10);
+ kdb_print_nameval("r9", p->r9);
+ kdb_print_nameval("r8", p->r8);
+ kdb_print_nameval("ax", p->ax);
+ kdb_print_nameval("cx", p->cx);
+ kdb_print_nameval("dx", p->dx);
+ kdb_print_nameval("si", p->si);
+ kdb_print_nameval("di", p->di);
+ kdb_print_nameval("orig_ax", p->orig_ax);
+ kdb_print_nameval("ip", p->ip);
+ kdb_printf(fmt, "cs", p->cs);
+ kdb_printf(fmt, "flags", p->flags);
+ kdb_printf(fmt, "sp", p->sp);
+ kdb_printf(fmt, "ss", p->ss);
+ return 0;
+}
+#endif /* CONFIG_X86_32 */
+
+/*
+ * kdba_dumpregs
+ *
+ * Dump the specified register set to the display.
+ *
+ * Parameters:
+ * regs Pointer to structure containing registers.
+ * type Character string identifying register set to dump
+ * extra string further identifying register (optional)
+ * Outputs:
+ * Returns:
+ * 0 Success
+ * Locking:
+ * None.
+ * Remarks:
+ * This function will dump the general register set if the type
+ * argument is NULL (struct pt_regs). The alternate register
+ * set types supported by this function:
+ *
+ * d Debug registers
+ * c Control registers
+ * u User registers at most recent entry to kernel
+ * for the process currently selected with "pid" command.
+ * Following not yet implemented:
+ * r Memory Type Range Registers (extra defines register)
+ *
+ * MSR on i386/x86_64 are handled by rdmsr/wrmsr commands.
+ */
+
+int
+kdba_dumpregs(struct pt_regs *regs,
+ const char *type,
+ const char *extra)
+{
+ int i;
+ int count = 0;
+
+ if (type
+ && (type[0] == 'u')) {
+ type = NULL;
+ regs = (struct pt_regs *)
+ (kdb_current_task->thread.sp0 - sizeof(struct pt_regs));
+ }
+
+ if (type == NULL) {
+ struct kdbregs *rlp;
+ kdb_machreg_t contents;
+
+ if (!regs) {
+ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
+ return KDB_BADREG;
+ }
+
+#ifdef CONFIG_X86_32
+ for (i=0, rlp=kdbreglist; i<nkdbreglist; i++,rlp++) {
+ kdb_printf("%s = ", rlp->reg_name);
+ kdba_getregcontents(rlp->reg_name, regs, &contents);
+ kdb_printf("0x%08lx ", contents);
+ if ((++count % 4) == 0)
+ kdb_printf("\n");
+ }
+#else
+ for (i=0, rlp=kdbreglist; i<nkdbreglist; i++,rlp++) {
+ kdb_printf("%8s = ", rlp->reg_name);
+ kdba_getregcontents(rlp->reg_name, regs, &contents);
+ kdb_printf("0x%016lx ", contents);
+ if ((++count % 2) == 0)
+ kdb_printf("\n");
+ }
+#endif
+
+ kdb_printf("&regs = 0x%p\n", regs);
+
+ return 0;
+ }
+
+ switch (type[0]) {
+ case 'd':
+ {
+ unsigned long dr[8];
+
+ for(i=0; i<8; i++) {
+ if ((i == 4) || (i == 5)) continue;
+ dr[i] = kdba_getdr(i);
+ }
+ kdb_printf("dr0 = 0x%08lx dr1 = 0x%08lx dr2 = 0x%08lx dr3 = 0x%08lx\n",
+ dr[0], dr[1], dr[2], dr[3]);
+ kdb_printf("dr6 = 0x%08lx dr7 = 0x%08lx\n",
+ dr[6], dr[7]);
+ return 0;
+ }
+ case 'c':
+ {
+ unsigned long cr[5];
+
+ for (i=0; i<5; i++) {
+ cr[i] = kdba_getcr(i);
+ }
+ kdb_printf("cr0 = 0x%08lx cr1 = 0x%08lx cr2 = 0x%08lx cr3 = 0x%08lx\ncr4 = 0x%08lx\n",
+ cr[0], cr[1], cr[2], cr[3], cr[4]);
+ return 0;
+ }
+ case 'r':
+ break;
+ default:
+ return KDB_BADREG;
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+EXPORT_SYMBOL(kdba_dumpregs);
+
+kdb_machreg_t
+kdba_getpc(struct pt_regs *regs)
+{
+ return regs ? regs->ip : 0;
+}
+
+int
+kdba_setpc(struct pt_regs *regs, kdb_machreg_t newpc)
+{
+ if (KDB_NULL_REGS(regs))
+ return KDB_BADREG;
+ regs->ip = newpc;
+ KDB_STATE_SET(IP_ADJUSTED);
+ return 0;
+}
+
+/*
+ * kdba_main_loop
+ *
+ * Do any architecture specific set up before entering the main kdb loop.
+ * The primary function of this routine is to make all processes look the
+ * same to kdb, kdb must be able to list a process without worrying if the
+ * process is running or blocked, so make all process look as though they
+ * are blocked.
+ *
+ * Inputs:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * error2 kdb's current reason code. Initially error but can change
+ * acording to kdb state.
+ * db_result Result from break or debug point.
+ * regs The exception frame at time of fault/breakpoint. If reason
+ * is SILENT or CPU_UP then regs is NULL, otherwise it should
+ * always be valid.
+ * Returns:
+ * 0 KDB was invoked for an event which it wasn't responsible
+ * 1 KDB handled the event for which it was invoked.
+ * Outputs:
+ * Sets ip and sp in current->thread.
+ * Locking:
+ * None.
+ * Remarks:
+ * none.
+ */
+
+int
+kdba_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
+ kdb_dbtrap_t db_result, struct pt_regs *regs)
+{
+ int ret;
+
+#ifdef CONFIG_X86_64
+ if (regs)
+ kdba_getregcontents("sp", regs, &(current->thread.sp));
+#endif
+ ret = kdb_save_running(regs, reason, reason2, error, db_result);
+ kdb_unsave_running(regs);
+ return ret;
+}
+
+void
+kdba_disableint(kdb_intstate_t *state)
+{
+ unsigned long *fp = (unsigned long *)state;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *fp = flags;
+}
+
+void
+kdba_restoreint(kdb_intstate_t *state)
+{
+ unsigned long flags = *(unsigned long *)state;
+ local_irq_restore(flags);
+}
+
+void
+kdba_setsinglestep(struct pt_regs *regs)
+{
+ if (KDB_NULL_REGS(regs))
+ return;
+ if (regs->flags & X86_EFLAGS_IF)
+ KDB_STATE_SET(A_IF);
+ else
+ KDB_STATE_CLEAR(A_IF);
+ regs->flags = (regs->flags | X86_EFLAGS_TF) & ~X86_EFLAGS_IF;
+}
+
+void
+kdba_clearsinglestep(struct pt_regs *regs)
+{
+ if (KDB_NULL_REGS(regs))
+ return;
+ if (KDB_STATE(A_IF))
+ regs->flags |= X86_EFLAGS_IF;
+ else
+ regs->flags &= ~X86_EFLAGS_IF;
+}
+
+#ifdef CONFIG_X86_32
+int asmlinkage
+kdba_setjmp(kdb_jmp_buf *jb)
+{
+#ifdef CONFIG_FRAME_POINTER
+ __asm__ ("movl 8(%esp), %eax\n\t"
+ "movl %ebx, 0(%eax)\n\t"
+ "movl %esi, 4(%eax)\n\t"
+ "movl %edi, 8(%eax)\n\t"
+ "movl (%esp), %ecx\n\t"
+ "movl %ecx, 12(%eax)\n\t"
+ "leal 8(%esp), %ecx\n\t"
+ "movl %ecx, 16(%eax)\n\t"
+ "movl 4(%esp), %ecx\n\t"
+ "movl %ecx, 20(%eax)\n\t");
+#else /* CONFIG_FRAME_POINTER */
+ __asm__ ("movl 4(%esp), %eax\n\t"
+ "movl %ebx, 0(%eax)\n\t"
+ "movl %esi, 4(%eax)\n\t"
+ "movl %edi, 8(%eax)\n\t"
+ "movl %ebp, 12(%eax)\n\t"
+ "leal 4(%esp), %ecx\n\t"
+ "movl %ecx, 16(%eax)\n\t"
+ "movl 0(%esp), %ecx\n\t"
+ "movl %ecx, 20(%eax)\n\t");
+#endif /* CONFIG_FRAME_POINTER */
+ return 0;
+}
+
+void asmlinkage
+kdba_longjmp(kdb_jmp_buf *jb, int reason)
+{
+#ifdef CONFIG_FRAME_POINTER
+ __asm__("movl 8(%esp), %ecx\n\t"
+ "movl 12(%esp), %eax\n\t"
+ "movl 20(%ecx), %edx\n\t"
+ "movl 0(%ecx), %ebx\n\t"
+ "movl 4(%ecx), %esi\n\t"
+ "movl 8(%ecx), %edi\n\t"
+ "movl 12(%ecx), %ebp\n\t"
+ "movl 16(%ecx), %esp\n\t"
+ "jmp *%edx\n");
+#else /* CONFIG_FRAME_POINTER */
+ __asm__("movl 4(%esp), %ecx\n\t"
+ "movl 8(%esp), %eax\n\t"
+ "movl 20(%ecx), %edx\n\t"
+ "movl 0(%ecx), %ebx\n\t"
+ "movl 4(%ecx), %esi\n\t"
+ "movl 8(%ecx), %edi\n\t"
+ "movl 12(%ecx), %ebp\n\t"
+ "movl 16(%ecx), %esp\n\t"
+ "jmp *%edx\n");
+#endif /* CONFIG_FRAME_POINTER */
+}
+
+#else /* CONFIG_X86_32 */
+
+int asmlinkage
+kdba_setjmp(kdb_jmp_buf *jb)
+{
+#ifdef CONFIG_FRAME_POINTER
+ __asm__ __volatile__
+ ("movq %%rbx, (0*8)(%%rdi);"
+ "movq %%rcx, (1*8)(%%rdi);"
+ "movq %%r12, (2*8)(%%rdi);"
+ "movq %%r13, (3*8)(%%rdi);"
+ "movq %%r14, (4*8)(%%rdi);"
+ "movq %%r15, (5*8)(%%rdi);"
+ "leaq 16(%%rsp), %%rdx;"
+ "movq %%rdx, (6*8)(%%rdi);"
+ "movq %%rax, (7*8)(%%rdi)"
+ :
+ : "a" (__builtin_return_address(0)),
+ "c" (__builtin_frame_address(1))
+ );
+#else /* !CONFIG_FRAME_POINTER */
+ __asm__ __volatile__
+ ("movq %%rbx, (0*8)(%%rdi);"
+ "movq %%rbp, (1*8)(%%rdi);"
+ "movq %%r12, (2*8)(%%rdi);"
+ "movq %%r13, (3*8)(%%rdi);"
+ "movq %%r14, (4*8)(%%rdi);"
+ "movq %%r15, (5*8)(%%rdi);"
+ "leaq 8(%%rsp), %%rdx;"
+ "movq %%rdx, (6*8)(%%rdi);"
+ "movq %%rax, (7*8)(%%rdi)"
+ :
+ : "a" (__builtin_return_address(0))
+ );
+#endif /* CONFIG_FRAME_POINTER */
+ return 0;
+}
+
+void asmlinkage
+kdba_longjmp(kdb_jmp_buf *jb, int reason)
+{
+ __asm__("movq (0*8)(%rdi),%rbx;"
+ "movq (1*8)(%rdi),%rbp;"
+ "movq (2*8)(%rdi),%r12;"
+ "movq (3*8)(%rdi),%r13;"
+ "movq (4*8)(%rdi),%r14;"
+ "movq (5*8)(%rdi),%r15;"
+ "movq (7*8)(%rdi),%rdx;"
+ "movq (6*8)(%rdi),%rsp;"
+ "mov %rsi, %rax;"
+ "jmpq *%rdx");
+}
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_X86_32
+/*
+ * kdba_stackdepth
+ *
+ * Print processes that are using more than a specific percentage of their
+ * stack.
+ *
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * If no percentage is supplied, it uses 60.
+ */
+
+static void
+kdba_stackdepth1(struct task_struct *p, unsigned long sp)
+{
+ struct thread_info *tinfo;
+ int used;
+ const char *type;
+ kdb_ps1(p);
+ do {
+ tinfo = (struct thread_info *)(sp & -THREAD_SIZE);
+ used = sizeof(*tinfo) + THREAD_SIZE - (sp & (THREAD_SIZE-1));
+ type = NULL;
+ if (kdb_task_has_cpu(p)) {
+ struct kdb_activation_record ar;
+ memset(&ar, 0, sizeof(ar));
+ kdba_get_stack_info_alternate(sp, -1, &ar);
+ type = ar.stack.id;
+ }
+ if (!type)
+ type = "process";
+ kdb_printf(" %s stack %p sp %lx used %d\n", type, tinfo, sp, used);
+ sp = tinfo->previous_esp;
+ } while (sp);
+}
+
+static int
+kdba_stackdepth(int argc, const char **argv)
+{
+ int diag, cpu, threshold, used, over;
+ unsigned long percentage;
+ unsigned long esp;
+ long offset = 0;
+ int nextarg;
+ struct task_struct *p, *g;
+ struct kdb_running_process *krp;
+ struct thread_info *tinfo;
+
+ if (argc == 0) {
+ percentage = 60;
+ } else if (argc == 1) {
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &percentage, &offset, NULL);
+ if (diag)
+ return diag;
+ } else {
+ return KDB_ARGCOUNT;
+ }
+ percentage = max_t(int, percentage, 1);
+ percentage = min_t(int, percentage, 100);
+ threshold = ((2 * THREAD_SIZE * percentage) / 100 + 1) >> 1;
+ kdb_printf("stackdepth: processes using more than %ld%% (%d bytes) of stack\n",
+ percentage, threshold);
+
+ /* Run the active tasks first, they can have multiple stacks */
+ for (cpu = 0, krp = kdb_running_process; cpu < NR_CPUS; ++cpu, ++krp) {
+ if (!cpu_online(cpu))
+ continue;
+ p = krp->p;
+ esp = krp->arch.sp;
+ over = 0;
+ do {
+ tinfo = (struct thread_info *)(esp & -THREAD_SIZE);
+ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1));
+ if (used >= threshold)
+ over = 1;
+ esp = tinfo->previous_esp;
+ } while (esp);
+ if (over)
+ kdba_stackdepth1(p, krp->arch.sp);
+ }
+ /* Now the tasks that are not on cpus */
+ kdb_do_each_thread(g, p) {
+ if (kdb_task_has_cpu(p))
+ continue;
+ esp = p->thread.sp;
+ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1));
+ over = used >= threshold;
+ if (over)
+ kdba_stackdepth1(p, esp);
+ } kdb_while_each_thread(g, p);
+
+ return 0;
+}
+#else /* CONFIG_X86_32 */
+
+
+/*
+ * kdba_entry
+ *
+ * This is the interface routine between
+ * the notifier die_chain and kdb
+ */
+static int kdba_entry( struct notifier_block *b, unsigned long val, void *v)
+{
+ struct die_args *args = v;
+ int err, trap, ret = 0;
+ struct pt_regs *regs;
+
+ regs = args->regs;
+ err = args->err;
+ trap = args->trapnr;
+ switch (val){
+#ifdef CONFIG_SMP
+ case DIE_NMI_IPI:
+ ret = kdb_ipi(regs, NULL);
+ break;
+#endif /* CONFIG_SMP */
+ case DIE_OOPS:
+ ret = kdb(KDB_REASON_OOPS, err, regs);
+ break;
+ case DIE_CALL:
+ ret = kdb(KDB_REASON_ENTER, err, regs);
+ break;
+ case DIE_DEBUG:
+ ret = kdb(KDB_REASON_DEBUG, err, regs);
+ break;
+ case DIE_NMIWATCHDOG:
+ ret = kdb(KDB_REASON_NMI, err, regs);
+ break;
+ case DIE_INT3:
+ ret = kdb(KDB_REASON_BREAK, err, regs);
+ // falls thru
+ default:
+ break;
+ }
+ return (ret ? NOTIFY_STOP : NOTIFY_DONE);
+}
+
+/*
+ * notifier block for kdb entry
+ */
+static struct notifier_block kdba_notifier = {
+ .notifier_call = kdba_entry
+};
+#endif /* CONFIG_X86_32 */
+
+asmlinkage int kdb_call(void);
+
+/* Executed once on each cpu at startup. */
+void
+kdba_cpu_up(void)
+{
+}
+
+static int __init
+kdba_arch_init(void)
+{
+ set_intr_gate(KDBENTER_VECTOR, kdb_call);
+ return 0;
+}
+
+arch_initcall(kdba_arch_init);
+
+/*
+ * kdba_init
+ *
+ * Architecture specific initialization.
+ *
+ * Parameters:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ * None.
+ */
+
+void __init
+kdba_init(void)
+{
+ kdba_arch_init(); /* Need to register KDBENTER_VECTOR early */
+ kdb_register("pt_regs", kdba_pt_regs, "address", "Format struct pt_regs", 0);
+#ifdef CONFIG_X86_32
+ kdb_register("stackdepth", kdba_stackdepth, "[percentage]", "Print processes using >= stack percentage", 0);
+#else
+ register_die_notifier(&kdba_notifier);
+#endif
+ return;
+}
+
+/*
+ * kdba_adjust_ip
+ *
+ * Architecture specific adjustment of instruction pointer before leaving
+ * kdb.
+ *
+ * Parameters:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * regs The exception frame at time of fault/breakpoint. If reason
+ * is SILENT or CPU_UP then regs is NULL, otherwise it should
+ * always be valid.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ * noop on ix86.
+ */
+
+void
+kdba_adjust_ip(kdb_reason_t reason, int error, struct pt_regs *regs)
+{
+ return;
+}
+
+void
+kdba_set_current_task(const struct task_struct *p)
+{
+ kdb_current_task = p;
+ if (kdb_task_has_cpu(p)) {
+ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p);
+ kdb_current_regs = krp->regs;
+ return;
+ }
+ kdb_current_regs = NULL;
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to
+ * trap invalid addresses in the _xxx fields. Verify the other address
+ * of the pair is valid by accessing the first and last byte ourselves,
+ * then any access violations should only be caused by the _xxx
+ * addresses,
+ */
+
+int
+kdba_putarea_size(unsigned long to_xxx, void *from, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int r;
+ char c;
+ c = *((volatile char *)from);
+ c = *((volatile char *)from + size - 1);
+
+ if (to_xxx < PAGE_OFFSET) {
+ return kdb_putuserarea_size(to_xxx, from, size);
+ }
+
+ set_fs(KERNEL_DS);
+ r = __copy_to_user_inatomic((void __user *)to_xxx, from, size);
+ set_fs(oldfs);
+ return r;
+}
+
+int
+kdba_getarea_size(void *to, unsigned long from_xxx, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int r;
+ *((volatile char *)to) = '\0';
+ *((volatile char *)to + size - 1) = '\0';
+
+ if (from_xxx < PAGE_OFFSET) {
+ return kdb_getuserarea_size(to, from_xxx, size);
+ }
+
+ set_fs(KERNEL_DS);
+ switch (size) {
+ case 1:
+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 1);
+ break;
+ case 2:
+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 2);
+ break;
+ case 4:
+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 4);
+ break;
+ case 8:
+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 8);
+ break;
+ default:
+ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, size);
+ break;
+ }
+ set_fs(oldfs);
+ return r;
+}
+
+int
+kdba_verify_rw(unsigned long addr, size_t size)
+{
+ unsigned char data[size];
+ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size));
+}
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_SMP
+
+#include <asm/ipi.h>
+
+gate_desc save_idt[NR_VECTORS];
+
+void kdba_takeover_vector(int vector)
+{
+ memcpy(&save_idt[vector], &idt_table[vector], sizeof(gate_desc));
+ set_intr_gate(KDB_VECTOR, kdb_interrupt);
+ return;
+}
+
+void kdba_giveback_vector(int vector)
+{
+ native_write_idt_entry(idt_table, vector, &save_idt[vector]);
+ return;
+}
+
+/* When first entering KDB, try a normal IPI. That reduces backtrace problems
+ * on the other cpus.
+ */
+void
+smp_kdb_stop(void)
+{
+ if (!KDB_FLAG(NOIPI)) {
+ kdba_takeover_vector(KDB_VECTOR);
+ apic->send_IPI_allbutself(KDB_VECTOR);
+ }
+}
+
+/* The normal KDB IPI handler */
+#ifdef CONFIG_X86_64
+asmlinkage
+#endif
+void
+smp_kdb_interrupt(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ ack_APIC_irq();
+ irq_enter();
+ kdb_ipi(regs, NULL);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+/* Invoked once from kdb_wait_for_cpus when waiting for cpus. For those cpus
+ * that have not responded to the normal KDB interrupt yet, hit them with an
+ * NMI event.
+ */
+void
+kdba_wait_for_cpus(void)
+{
+ int c;
+ if (KDB_FLAG(CATASTROPHIC))
+ return;
+ kdb_printf(" Sending NMI to non-responding cpus: ");
+ for_each_online_cpu(c) {
+ if (kdb_running_process[c].seqno < kdb_seqno - 1) {
+ kdb_printf(" %d", c);
+ apic->send_IPI_mask(cpumask_of(c), NMI_VECTOR);
+ }
+ }
+ kdb_printf(".\n");
+}
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_KDB_KDUMP
+void kdba_kdump_prepare(struct pt_regs *regs)
+{
+ int i;
+ struct pt_regs r;
+ if (regs == NULL)
+ regs = &r;
+
+ for (i = 1; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+
+ KDB_STATE_SET_CPU(KEXEC, i);
+ }
+
+ machine_crash_shutdown(regs);
+}
+
+extern void halt_current_cpu(struct pt_regs *);
+
+void kdba_kdump_shutdown_slave(struct pt_regs *regs)
+{
+#ifndef CONFIG_XEN
+ halt_current_cpu(regs);
+#endif /* CONFIG_XEN */
+}
+
+#endif /* CONFIG_KDB_KDUMP */
--- /dev/null
+++ b/arch/x86/kdb/pc_keyb.h
@@ -0,0 +1,137 @@
+/*
+ * include/linux/pc_keyb.h
+ *
+ * PC Keyboard And Keyboard Controller
+ *
+ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+/*
+ * Configuration Switches
+ */
+
+#undef KBD_REPORT_ERR /* Report keyboard errors */
+#define KBD_REPORT_UNKN /* Report unknown scan codes */
+#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */
+#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged FOCUS-9000 keyboard */
+#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse needs initialization. */
+
+
+
+#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */
+#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */
+#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */
+
+/*
+ * Internal variables of the driver
+ */
+
+extern unsigned char pckbd_read_mask;
+extern unsigned char aux_device_present;
+
+/*
+ * Keyboard Controller Registers on normal PCs.
+ */
+
+#define KBD_STATUS_REG 0x64 /* Status register (R) */
+#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
+
+/*
+ * Keyboard Controller Commands
+ */
+
+#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */
+#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
+#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
+#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
+#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
+#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
+#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
+#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
+#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
+#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
+#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if
+ initiated by the auxiliary device */
+#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */
+
+/*
+ * Keyboard Commands
+ */
+
+#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */
+#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */
+#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
+#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
+#define KBD_CMD_RESET 0xFF /* Reset */
+
+/*
+ * Keyboard Replies
+ */
+
+#define KBD_REPLY_POR 0xAA /* Power on reset */
+#define KBD_REPLY_ACK 0xFA /* Command ACK */
+#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
+
+/*
+ * Status Register Bits
+ */
+
+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
+#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
+#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */
+#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
+#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
+#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
+#define KBD_STAT_PERR 0x80 /* Parity error */
+
+#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF)
+
+/*
+ * Controller Mode Register Bits
+ */
+
+#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
+#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */
+#define KBD_MODE_SYS 0x04 /* The system flag (?) */
+#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */
+#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
+#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
+#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
+#define KBD_MODE_RFU 0x80
+
+/*
+ * Mouse Commands
+ */
+
+#define AUX_SET_RES 0xE8 /* Set resolution */
+#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */
+#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */
+#define AUX_GET_SCALE 0xE9 /* Get scaling factor */
+#define AUX_SET_STREAM 0xEA /* Set stream mode */
+#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */
+#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
+#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
+#define AUX_RESET 0xFF /* Reset aux device */
+#define AUX_ACK 0xFA /* Command byte ACK. */
+
+#define AUX_BUF_SIZE 2048 /* This might be better divisible by
+ three to make overruns stay in sync
+ but then the read function would need
+ a lock etc - ick */
+
+struct aux_queue {
+ unsigned long head;
+ unsigned long tail;
+ wait_queue_head_t proc_list;
+ struct fasync_struct *fasync;
+ unsigned char buf[AUX_BUF_SIZE];
+};
+
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
--- /dev/null
+++ b/arch/x86/kdb/x86-dis.c
@@ -0,0 +1,4688 @@
+/* Print i386 instructions for GDB, the GNU debugger.
+ Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
+ * Run through col -b to remove trailing whitespace and various #ifdef/ifndef
+ * __KERNEL__ added.
+ * Keith Owens <kaos@sgi.com> 15 May 2006
+ */
+
+/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu)
+ July 1988
+ modified by John Hassey (hassey@dg-rtp.dg.com)
+ x86-64 support added by Jan Hubicka (jh@suse.cz)
+ VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */
+
+/* The main tables describing the instructions is essentially a copy
+ of the "Opcode Map" chapter (Appendix A) of the Intel 80386
+ Programmers Manual. Usually, there is a capital letter, followed
+ by a small letter. The capital letter tell the addressing mode,
+ and the small letter tells about the operand size. Refer to
+ the Intel manual for details. */
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/dis-asm.h>
+#include <linux/kdb.h>
+#define abort() BUG()
+#else /* __KERNEL__ */
+#include "dis-asm.h"
+#include "sysdep.h"
+#include "opintl.h"
+#endif /* __KERNEL__ */
+
+#define MAXLEN 20
+
+#ifndef __KERNEL__
+#include <setjmp.h>
+#endif /* __KERNEL__ */
+
+#ifndef UNIXWARE_COMPAT
+/* Set non-zero for broken, compatible instructions. Set to zero for
+ non-broken opcodes. */
+#define UNIXWARE_COMPAT 1
+#endif
+
+static int fetch_data (struct disassemble_info *, bfd_byte *);
+static void ckprefix (void);
+static const char *prefix_name (int, int);
+static int print_insn (bfd_vma, disassemble_info *);
+static void dofloat (int);
+static void OP_ST (int, int);
+static void OP_STi (int, int);
+static int putop (const char *, int);
+static void oappend (const char *);
+static void append_seg (void);
+static void OP_indirE (int, int);
+static void print_operand_value (char *, int, bfd_vma);
+static void OP_E (int, int);
+static void OP_G (int, int);
+static bfd_vma get64 (void);
+static bfd_signed_vma get32 (void);
+static bfd_signed_vma get32s (void);
+static int get16 (void);
+static void set_op (bfd_vma, int);
+static void OP_REG (int, int);
+static void OP_IMREG (int, int);
+static void OP_I (int, int);
+static void OP_I64 (int, int);
+static void OP_sI (int, int);
+static void OP_J (int, int);
+static void OP_SEG (int, int);
+static void OP_DIR (int, int);
+static void OP_OFF (int, int);
+static void OP_OFF64 (int, int);
+static void ptr_reg (int, int);
+static void OP_ESreg (int, int);
+static void OP_DSreg (int, int);
+static void OP_C (int, int);
+static void OP_D (int, int);
+static void OP_T (int, int);
+static void OP_Rd (int, int);
+static void OP_MMX (int, int);
+static void OP_XMM (int, int);
+static void OP_EM (int, int);
+static void OP_EX (int, int);
+static void OP_MS (int, int);
+static void OP_XS (int, int);
+static void OP_M (int, int);
+static void OP_VMX (int, int);
+static void OP_0fae (int, int);
+static void OP_0f07 (int, int);
+static void NOP_Fixup (int, int);
+static void OP_3DNowSuffix (int, int);
+static void OP_SIMD_Suffix (int, int);
+static void SIMD_Fixup (int, int);
+static void PNI_Fixup (int, int);
+static void SVME_Fixup (int, int);
+static void INVLPG_Fixup (int, int);
+static void BadOp (void);
+static void SEG_Fixup (int, int);
+static void VMX_Fixup (int, int);
+
+struct dis_private {
+ /* Points to first byte not fetched. */
+ bfd_byte *max_fetched;
+ bfd_byte the_buffer[MAXLEN];
+ bfd_vma insn_start;
+ int orig_sizeflag;
+#ifndef __KERNEL__
+ jmp_buf bailout;
+#endif /* __KERNEL__ */
+};
+
+/* The opcode for the fwait instruction, which we treat as a prefix
+ when we can. */
+#define FWAIT_OPCODE (0x9b)
+
+/* Set to 1 for 64bit mode disassembly. */
+static int mode_64bit;
+
+/* Flags for the prefixes for the current instruction. See below. */
+static int prefixes;
+
+/* REX prefix the current instruction. See below. */
+static int rex;
+/* Bits of REX we've already used. */
+static int rex_used;
+#define REX_MODE64 8
+#define REX_EXTX 4
+#define REX_EXTY 2
+#define REX_EXTZ 1
+/* Mark parts used in the REX prefix. When we are testing for
+ empty prefix (for 8bit register REX extension), just mask it
+ out. Otherwise test for REX bit is excuse for existence of REX
+ only in case value is nonzero. */
+#define USED_REX(value) \
+ { \
+ if (value) \
+ rex_used |= (rex & value) ? (value) | 0x40 : 0; \
+ else \
+ rex_used |= 0x40; \
+ }
+
+/* Flags for prefixes which we somehow handled when printing the
+ current instruction. */
+static int used_prefixes;
+
+/* Flags stored in PREFIXES. */
+#define PREFIX_REPZ 1
+#define PREFIX_REPNZ 2
+#define PREFIX_LOCK 4
+#define PREFIX_CS 8
+#define PREFIX_SS 0x10
+#define PREFIX_DS 0x20
+#define PREFIX_ES 0x40
+#define PREFIX_FS 0x80
+#define PREFIX_GS 0x100
+#define PREFIX_DATA 0x200
+#define PREFIX_ADDR 0x400
+#define PREFIX_FWAIT 0x800
+
+/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive)
+ to ADDR (exclusive) are valid. Returns 1 for success, longjmps
+ on error. */
+#define FETCH_DATA(info, addr) \
+ ((addr) <= ((struct dis_private *) (info->private_data))->max_fetched \
+ ? 1 : fetch_data ((info), (addr)))
+
+static int
+fetch_data (struct disassemble_info *info, bfd_byte *addr)
+{
+ int status;
+ struct dis_private *priv = (struct dis_private *) info->private_data;
+ bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer);
+
+ status = (*info->read_memory_func) (start,
+ priv->max_fetched,
+ addr - priv->max_fetched,
+ info);
+ if (status != 0)
+ {
+ /* If we did manage to read at least one byte, then
+ print_insn_i386 will do something sensible. Otherwise, print
+ an error. We do that here because this is where we know
+ STATUS. */
+ if (priv->max_fetched == priv->the_buffer)
+ (*info->memory_error_func) (status, start, info);
+#ifndef __KERNEL__
+ longjmp (priv->bailout, 1);
+#else /* __KERNEL__ */
+ /* XXX - what to do? */
+ kdb_printf("Hmm. longjmp.\n");
+#endif /* __KERNEL__ */
+ }
+ else
+ priv->max_fetched = addr;
+ return 1;
+}
+
+#define XX NULL, 0
+
+#define Eb OP_E, b_mode
+#define Ev OP_E, v_mode
+#define Ed OP_E, d_mode
+#define Eq OP_E, q_mode
+#define Edq OP_E, dq_mode
+#define Edqw OP_E, dqw_mode
+#define indirEv OP_indirE, branch_v_mode
+#define indirEp OP_indirE, f_mode
+#define Em OP_E, m_mode
+#define Ew OP_E, w_mode
+#define Ma OP_E, v_mode
+#define M OP_M, 0 /* lea, lgdt, etc. */
+#define Mp OP_M, f_mode /* 32 or 48 bit memory operand for LDS, LES etc */
+#define Gb OP_G, b_mode
+#define Gv OP_G, v_mode
+#define Gd OP_G, d_mode
+#define Gdq OP_G, dq_mode
+#define Gm OP_G, m_mode
+#define Gw OP_G, w_mode
+#define Rd OP_Rd, d_mode
+#define Rm OP_Rd, m_mode
+#define Ib OP_I, b_mode
+#define sIb OP_sI, b_mode /* sign extened byte */
+#define Iv OP_I, v_mode
+#define Iq OP_I, q_mode
+#define Iv64 OP_I64, v_mode
+#define Iw OP_I, w_mode
+#define I1 OP_I, const_1_mode
+#define Jb OP_J, b_mode
+#define Jv OP_J, v_mode
+#define Cm OP_C, m_mode
+#define Dm OP_D, m_mode
+#define Td OP_T, d_mode
+#define Sv SEG_Fixup, v_mode
+
+#define RMeAX OP_REG, eAX_reg
+#define RMeBX OP_REG, eBX_reg
+#define RMeCX OP_REG, eCX_reg
+#define RMeDX OP_REG, eDX_reg
+#define RMeSP OP_REG, eSP_reg
+#define RMeBP OP_REG, eBP_reg
+#define RMeSI OP_REG, eSI_reg
+#define RMeDI OP_REG, eDI_reg
+#define RMrAX OP_REG, rAX_reg
+#define RMrBX OP_REG, rBX_reg
+#define RMrCX OP_REG, rCX_reg
+#define RMrDX OP_REG, rDX_reg
+#define RMrSP OP_REG, rSP_reg
+#define RMrBP OP_REG, rBP_reg
+#define RMrSI OP_REG, rSI_reg
+#define RMrDI OP_REG, rDI_reg
+#define RMAL OP_REG, al_reg
+#define RMAL OP_REG, al_reg
+#define RMCL OP_REG, cl_reg
+#define RMDL OP_REG, dl_reg
+#define RMBL OP_REG, bl_reg
+#define RMAH OP_REG, ah_reg
+#define RMCH OP_REG, ch_reg
+#define RMDH OP_REG, dh_reg
+#define RMBH OP_REG, bh_reg
+#define RMAX OP_REG, ax_reg
+#define RMDX OP_REG, dx_reg
+
+#define eAX OP_IMREG, eAX_reg
+#define eBX OP_IMREG, eBX_reg
+#define eCX OP_IMREG, eCX_reg
+#define eDX OP_IMREG, eDX_reg
+#define eSP OP_IMREG, eSP_reg
+#define eBP OP_IMREG, eBP_reg
+#define eSI OP_IMREG, eSI_reg
+#define eDI OP_IMREG, eDI_reg
+#define AL OP_IMREG, al_reg
+#define AL OP_IMREG, al_reg
+#define CL OP_IMREG, cl_reg
+#define DL OP_IMREG, dl_reg
+#define BL OP_IMREG, bl_reg
+#define AH OP_IMREG, ah_reg
+#define CH OP_IMREG, ch_reg
+#define DH OP_IMREG, dh_reg
+#define BH OP_IMREG, bh_reg
+#define AX OP_IMREG, ax_reg
+#define DX OP_IMREG, dx_reg
+#define indirDX OP_IMREG, indir_dx_reg
+
+#define Sw OP_SEG, w_mode
+#define Ap OP_DIR, 0
+#define Ob OP_OFF, b_mode
+#define Ob64 OP_OFF64, b_mode
+#define Ov OP_OFF, v_mode
+#define Ov64 OP_OFF64, v_mode
+#define Xb OP_DSreg, eSI_reg
+#define Xv OP_DSreg, eSI_reg
+#define Yb OP_ESreg, eDI_reg
+#define Yv OP_ESreg, eDI_reg
+#define DSBX OP_DSreg, eBX_reg
+
+#define es OP_REG, es_reg
+#define ss OP_REG, ss_reg
+#define cs OP_REG, cs_reg
+#define ds OP_REG, ds_reg
+#define fs OP_REG, fs_reg
+#define gs OP_REG, gs_reg
+
+#define MX OP_MMX, 0
+#define XM OP_XMM, 0
+#define EM OP_EM, v_mode
+#define EX OP_EX, v_mode
+#define MS OP_MS, v_mode
+#define XS OP_XS, v_mode
+#define VM OP_VMX, q_mode
+#define OPSUF OP_3DNowSuffix, 0
+#define OPSIMD OP_SIMD_Suffix, 0
+
+#define cond_jump_flag NULL, cond_jump_mode
+#define loop_jcxz_flag NULL, loop_jcxz_mode
+
+/* bits in sizeflag */
+#define SUFFIX_ALWAYS 4
+#define AFLAG 2
+#define DFLAG 1
+
+#define b_mode 1 /* byte operand */
+#define v_mode 2 /* operand size depends on prefixes */
+#define w_mode 3 /* word operand */
+#define d_mode 4 /* double word operand */
+#define q_mode 5 /* quad word operand */
+#define t_mode 6 /* ten-byte operand */
+#define x_mode 7 /* 16-byte XMM operand */
+#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */
+#define cond_jump_mode 9
+#define loop_jcxz_mode 10
+#define dq_mode 11 /* operand size depends on REX prefixes. */
+#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */
+#define f_mode 13 /* 4- or 6-byte pointer operand */
+#define const_1_mode 14
+#define branch_v_mode 15 /* v_mode for branch. */
+
+#define es_reg 100
+#define cs_reg 101
+#define ss_reg 102
+#define ds_reg 103
+#define fs_reg 104
+#define gs_reg 105
+
+#define eAX_reg 108
+#define eCX_reg 109
+#define eDX_reg 110
+#define eBX_reg 111
+#define eSP_reg 112
+#define eBP_reg 113
+#define eSI_reg 114
+#define eDI_reg 115
+
+#define al_reg 116
+#define cl_reg 117
+#define dl_reg 118
+#define bl_reg 119
+#define ah_reg 120
+#define ch_reg 121
+#define dh_reg 122
+#define bh_reg 123
+
+#define ax_reg 124
+#define cx_reg 125
+#define dx_reg 126
+#define bx_reg 127
+#define sp_reg 128
+#define bp_reg 129
+#define si_reg 130
+#define di_reg 131
+
+#define rAX_reg 132
+#define rCX_reg 133
+#define rDX_reg 134
+#define rBX_reg 135
+#define rSP_reg 136
+#define rBP_reg 137
+#define rSI_reg 138
+#define rDI_reg 139
+
+#define indir_dx_reg 150
+
+#define FLOATCODE 1
+#define USE_GROUPS 2
+#define USE_PREFIX_USER_TABLE 3
+#define X86_64_SPECIAL 4
+
+#define FLOAT NULL, NULL, FLOATCODE, NULL, 0, NULL, 0
+
+#define GRP1b NULL, NULL, USE_GROUPS, NULL, 0, NULL, 0
+#define GRP1S NULL, NULL, USE_GROUPS, NULL, 1, NULL, 0
+#define GRP1Ss NULL, NULL, USE_GROUPS, NULL, 2, NULL, 0
+#define GRP2b NULL, NULL, USE_GROUPS, NULL, 3, NULL, 0
+#define GRP2S NULL, NULL, USE_GROUPS, NULL, 4, NULL, 0
+#define GRP2b_one NULL, NULL, USE_GROUPS, NULL, 5, NULL, 0
+#define GRP2S_one NULL, NULL, USE_GROUPS, NULL, 6, NULL, 0
+#define GRP2b_cl NULL, NULL, USE_GROUPS, NULL, 7, NULL, 0
+#define GRP2S_cl NULL, NULL, USE_GROUPS, NULL, 8, NULL, 0
+#define GRP3b NULL, NULL, USE_GROUPS, NULL, 9, NULL, 0
+#define GRP3S NULL, NULL, USE_GROUPS, NULL, 10, NULL, 0
+#define GRP4 NULL, NULL, USE_GROUPS, NULL, 11, NULL, 0
+#define GRP5 NULL, NULL, USE_GROUPS, NULL, 12, NULL, 0
+#define GRP6 NULL, NULL, USE_GROUPS, NULL, 13, NULL, 0
+#define GRP7 NULL, NULL, USE_GROUPS, NULL, 14, NULL, 0
+#define GRP8 NULL, NULL, USE_GROUPS, NULL, 15, NULL, 0
+#define GRP9 NULL, NULL, USE_GROUPS, NULL, 16, NULL, 0
+#define GRP10 NULL, NULL, USE_GROUPS, NULL, 17, NULL, 0
+#define GRP11 NULL, NULL, USE_GROUPS, NULL, 18, NULL, 0
+#define GRP12 NULL, NULL, USE_GROUPS, NULL, 19, NULL, 0
+#define GRP13 NULL, NULL, USE_GROUPS, NULL, 20, NULL, 0
+#define GRP14 NULL, NULL, USE_GROUPS, NULL, 21, NULL, 0
+#define GRPAMD NULL, NULL, USE_GROUPS, NULL, 22, NULL, 0
+#define GRPPADLCK1 NULL, NULL, USE_GROUPS, NULL, 23, NULL, 0
+#define GRPPADLCK2 NULL, NULL, USE_GROUPS, NULL, 24, NULL, 0
+
+#define PREGRP0 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 0, NULL, 0
+#define PREGRP1 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 1, NULL, 0
+#define PREGRP2 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 2, NULL, 0
+#define PREGRP3 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 3, NULL, 0
+#define PREGRP4 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 4, NULL, 0
+#define PREGRP5 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 5, NULL, 0
+#define PREGRP6 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 6, NULL, 0
+#define PREGRP7 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 7, NULL, 0
+#define PREGRP8 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 8, NULL, 0
+#define PREGRP9 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 9, NULL, 0
+#define PREGRP10 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 10, NULL, 0
+#define PREGRP11 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 11, NULL, 0
+#define PREGRP12 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 12, NULL, 0
+#define PREGRP13 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 13, NULL, 0
+#define PREGRP14 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 14, NULL, 0
+#define PREGRP15 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 15, NULL, 0
+#define PREGRP16 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 16, NULL, 0
+#define PREGRP17 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 17, NULL, 0
+#define PREGRP18 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 18, NULL, 0
+#define PREGRP19 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 19, NULL, 0
+#define PREGRP20 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 20, NULL, 0
+#define PREGRP21 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 21, NULL, 0
+#define PREGRP22 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 22, NULL, 0
+#define PREGRP23 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 23, NULL, 0
+#define PREGRP24 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 24, NULL, 0
+#define PREGRP25 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 25, NULL, 0
+#define PREGRP26 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 26, NULL, 0
+#define PREGRP27 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 27, NULL, 0
+#define PREGRP28 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 28, NULL, 0
+#define PREGRP29 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 29, NULL, 0
+#define PREGRP30 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 30, NULL, 0
+#define PREGRP31 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 31, NULL, 0
+#define PREGRP32 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 32, NULL, 0
+
+#define X86_64_0 NULL, NULL, X86_64_SPECIAL, NULL, 0, NULL, 0
+
+typedef void (*op_rtn) (int bytemode, int sizeflag);
+
+struct dis386 {
+ const char *name;
+ op_rtn op1;
+ int bytemode1;
+ op_rtn op2;
+ int bytemode2;
+ op_rtn op3;
+ int bytemode3;
+};
+
+/* Upper case letters in the instruction names here are macros.
+ 'A' => print 'b' if no register operands or suffix_always is true
+ 'B' => print 'b' if suffix_always is true
+ 'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand
+ . size prefix
+ 'E' => print 'e' if 32-bit form of jcxz
+ 'F' => print 'w' or 'l' depending on address size prefix (loop insns)
+ 'H' => print ",pt" or ",pn" branch hint
+ 'I' => honor following macro letter even in Intel mode (implemented only
+ . for some of the macro letters)
+ 'J' => print 'l'
+ 'L' => print 'l' if suffix_always is true
+ 'N' => print 'n' if instruction has no wait "prefix"
+ 'O' => print 'd', or 'o'
+ 'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix,
+ . or suffix_always is true. print 'q' if rex prefix is present.
+ 'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always
+ . is true
+ 'R' => print 'w', 'l' or 'q' ("wd" or "dq" in intel mode)
+ 'S' => print 'w', 'l' or 'q' if suffix_always is true
+ 'T' => print 'q' in 64bit mode and behave as 'P' otherwise
+ 'U' => print 'q' in 64bit mode and behave as 'Q' otherwise
+ 'W' => print 'b' or 'w' ("w" or "de" in intel mode)
+ 'X' => print 's', 'd' depending on data16 prefix (for XMM)
+ 'Y' => 'q' if instruction has an REX 64bit overwrite prefix
+
+ Many of the above letters print nothing in Intel mode. See "putop"
+ for the details.
+
+ Braces '{' and '}', and vertical bars '|', indicate alternative
+ mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel
+ modes. In cases where there are only two alternatives, the X86_64
+ instruction is reserved, and "(bad)" is printed.
+*/
+
+static const struct dis386 dis386[] = {
+ /* 00 */
+ { "addB", Eb, Gb, XX },
+ { "addS", Ev, Gv, XX },
+ { "addB", Gb, Eb, XX },
+ { "addS", Gv, Ev, XX },
+ { "addB", AL, Ib, XX },
+ { "addS", eAX, Iv, XX },
+ { "push{T|}", es, XX, XX },
+ { "pop{T|}", es, XX, XX },
+ /* 08 */
+ { "orB", Eb, Gb, XX },
+ { "orS", Ev, Gv, XX },
+ { "orB", Gb, Eb, XX },
+ { "orS", Gv, Ev, XX },
+ { "orB", AL, Ib, XX },
+ { "orS", eAX, Iv, XX },
+ { "push{T|}", cs, XX, XX },
+ { "(bad)", XX, XX, XX }, /* 0x0f extended opcode escape */
+ /* 10 */
+ { "adcB", Eb, Gb, XX },
+ { "adcS", Ev, Gv, XX },
+ { "adcB", Gb, Eb, XX },
+ { "adcS", Gv, Ev, XX },
+ { "adcB", AL, Ib, XX },
+ { "adcS", eAX, Iv, XX },
+ { "push{T|}", ss, XX, XX },
+ { "popT|}", ss, XX, XX },
+ /* 18 */
+ { "sbbB", Eb, Gb, XX },
+ { "sbbS", Ev, Gv, XX },
+ { "sbbB", Gb, Eb, XX },
+ { "sbbS", Gv, Ev, XX },
+ { "sbbB", AL, Ib, XX },
+ { "sbbS", eAX, Iv, XX },
+ { "push{T|}", ds, XX, XX },
+ { "pop{T|}", ds, XX, XX },
+ /* 20 */
+ { "andB", Eb, Gb, XX },
+ { "andS", Ev, Gv, XX },
+ { "andB", Gb, Eb, XX },
+ { "andS", Gv, Ev, XX },
+ { "andB", AL, Ib, XX },
+ { "andS", eAX, Iv, XX },
+ { "(bad)", XX, XX, XX }, /* SEG ES prefix */
+ { "daa{|}", XX, XX, XX },
+ /* 28 */
+ { "subB", Eb, Gb, XX },
+ { "subS", Ev, Gv, XX },
+ { "subB", Gb, Eb, XX },
+ { "subS", Gv, Ev, XX },
+ { "subB", AL, Ib, XX },
+ { "subS", eAX, Iv, XX },
+ { "(bad)", XX, XX, XX }, /* SEG CS prefix */
+ { "das{|}", XX, XX, XX },
+ /* 30 */
+ { "xorB", Eb, Gb, XX },
+ { "xorS", Ev, Gv, XX },
+ { "xorB", Gb, Eb, XX },
+ { "xorS", Gv, Ev, XX },
+ { "xorB", AL, Ib, XX },
+ { "xorS", eAX, Iv, XX },
+ { "(bad)", XX, XX, XX }, /* SEG SS prefix */
+ { "aaa{|}", XX, XX, XX },
+ /* 38 */
+ { "cmpB", Eb, Gb, XX },
+ { "cmpS", Ev, Gv, XX },
+ { "cmpB", Gb, Eb, XX },
+ { "cmpS", Gv, Ev, XX },
+ { "cmpB", AL, Ib, XX },
+ { "cmpS", eAX, Iv, XX },
+ { "(bad)", XX, XX, XX }, /* SEG DS prefix */
+ { "aas{|}", XX, XX, XX },
+ /* 40 */
+ { "inc{S|}", RMeAX, XX, XX },
+ { "inc{S|}", RMeCX, XX, XX },
+ { "inc{S|}", RMeDX, XX, XX },
+ { "inc{S|}", RMeBX, XX, XX },
+ { "inc{S|}", RMeSP, XX, XX },
+ { "inc{S|}", RMeBP, XX, XX },
+ { "inc{S|}", RMeSI, XX, XX },
+ { "inc{S|}", RMeDI, XX, XX },
+ /* 48 */
+ { "dec{S|}", RMeAX, XX, XX },
+ { "dec{S|}", RMeCX, XX, XX },
+ { "dec{S|}", RMeDX, XX, XX },
+ { "dec{S|}", RMeBX, XX, XX },
+ { "dec{S|}", RMeSP, XX, XX },
+ { "dec{S|}", RMeBP, XX, XX },
+ { "dec{S|}", RMeSI, XX, XX },
+ { "dec{S|}", RMeDI, XX, XX },
+ /* 50 */
+ { "pushS", RMrAX, XX, XX },
+ { "pushS", RMrCX, XX, XX },
+ { "pushS", RMrDX, XX, XX },
+ { "pushS", RMrBX, XX, XX },
+ { "pushS", RMrSP, XX, XX },
+ { "pushS", RMrBP, XX, XX },
+ { "pushS", RMrSI, XX, XX },
+ { "pushS", RMrDI, XX, XX },
+ /* 58 */
+ { "popS", RMrAX, XX, XX },
+ { "popS", RMrCX, XX, XX },
+ { "popS", RMrDX, XX, XX },
+ { "popS", RMrBX, XX, XX },
+ { "popS", RMrSP, XX, XX },
+ { "popS", RMrBP, XX, XX },
+ { "popS", RMrSI, XX, XX },
+ { "popS", RMrDI, XX, XX },
+ /* 60 */
+ { "pusha{P|}", XX, XX, XX },
+ { "popa{P|}", XX, XX, XX },
+ { "bound{S|}", Gv, Ma, XX },
+ { X86_64_0 },
+ { "(bad)", XX, XX, XX }, /* seg fs */
+ { "(bad)", XX, XX, XX }, /* seg gs */
+ { "(bad)", XX, XX, XX }, /* op size prefix */
+ { "(bad)", XX, XX, XX }, /* adr size prefix */
+ /* 68 */
+ { "pushT", Iq, XX, XX },
+ { "imulS", Gv, Ev, Iv },
+ { "pushT", sIb, XX, XX },
+ { "imulS", Gv, Ev, sIb },
+ { "ins{b||b|}", Yb, indirDX, XX },
+ { "ins{R||R|}", Yv, indirDX, XX },
+ { "outs{b||b|}", indirDX, Xb, XX },
+ { "outs{R||R|}", indirDX, Xv, XX },
+ /* 70 */
+ { "joH", Jb, XX, cond_jump_flag },
+ { "jnoH", Jb, XX, cond_jump_flag },
+ { "jbH", Jb, XX, cond_jump_flag },
+ { "jaeH", Jb, XX, cond_jump_flag },
+ { "jeH", Jb, XX, cond_jump_flag },
+ { "jneH", Jb, XX, cond_jump_flag },
+ { "jbeH", Jb, XX, cond_jump_flag },
+ { "jaH", Jb, XX, cond_jump_flag },
+ /* 78 */
+ { "jsH", Jb, XX, cond_jump_flag },
+ { "jnsH", Jb, XX, cond_jump_flag },
+ { "jpH", Jb, XX, cond_jump_flag },
+ { "jnpH", Jb, XX, cond_jump_flag },
+ { "jlH", Jb, XX, cond_jump_flag },
+ { "jgeH", Jb, XX, cond_jump_flag },
+ { "jleH", Jb, XX, cond_jump_flag },
+ { "jgH", Jb, XX, cond_jump_flag },
+ /* 80 */
+ { GRP1b },
+ { GRP1S },
+ { "(bad)", XX, XX, XX },
+ { GRP1Ss },
+ { "testB", Eb, Gb, XX },
+ { "testS", Ev, Gv, XX },
+ { "xchgB", Eb, Gb, XX },
+ { "xchgS", Ev, Gv, XX },
+ /* 88 */
+ { "movB", Eb, Gb, XX },
+ { "movS", Ev, Gv, XX },
+ { "movB", Gb, Eb, XX },
+ { "movS", Gv, Ev, XX },
+ { "movQ", Sv, Sw, XX },
+ { "leaS", Gv, M, XX },
+ { "movQ", Sw, Sv, XX },
+ { "popU", Ev, XX, XX },
+ /* 90 */
+ { "nop", NOP_Fixup, 0, XX, XX },
+ { "xchgS", RMeCX, eAX, XX },
+ { "xchgS", RMeDX, eAX, XX },
+ { "xchgS", RMeBX, eAX, XX },
+ { "xchgS", RMeSP, eAX, XX },
+ { "xchgS", RMeBP, eAX, XX },
+ { "xchgS", RMeSI, eAX, XX },
+ { "xchgS", RMeDI, eAX, XX },
+ /* 98 */
+ { "cW{tR||tR|}", XX, XX, XX },
+ { "cR{tO||tO|}", XX, XX, XX },
+ { "Jcall{T|}", Ap, XX, XX },
+ { "(bad)", XX, XX, XX }, /* fwait */
+ { "pushfT", XX, XX, XX },
+ { "popfT", XX, XX, XX },
+ { "sahf{|}", XX, XX, XX },
+ { "lahf{|}", XX, XX, XX },
+ /* a0 */
+ { "movB", AL, Ob64, XX },
+ { "movS", eAX, Ov64, XX },
+ { "movB", Ob64, AL, XX },
+ { "movS", Ov64, eAX, XX },
+ { "movs{b||b|}", Yb, Xb, XX },
+ { "movs{R||R|}", Yv, Xv, XX },
+ { "cmps{b||b|}", Xb, Yb, XX },
+ { "cmps{R||R|}", Xv, Yv, XX },
+ /* a8 */
+ { "testB", AL, Ib, XX },
+ { "testS", eAX, Iv, XX },
+ { "stosB", Yb, AL, XX },
+ { "stosS", Yv, eAX, XX },
+ { "lodsB", AL, Xb, XX },
+ { "lodsS", eAX, Xv, XX },
+ { "scasB", AL, Yb, XX },
+ { "scasS", eAX, Yv, XX },
+ /* b0 */
+ { "movB", RMAL, Ib, XX },
+ { "movB", RMCL, Ib, XX },
+ { "movB", RMDL, Ib, XX },
+ { "movB", RMBL, Ib, XX },
+ { "movB", RMAH, Ib, XX },
+ { "movB", RMCH, Ib, XX },
+ { "movB", RMDH, Ib, XX },
+ { "movB", RMBH, Ib, XX },
+ /* b8 */
+ { "movS", RMeAX, Iv64, XX },
+ { "movS", RMeCX, Iv64, XX },
+ { "movS", RMeDX, Iv64, XX },
+ { "movS", RMeBX, Iv64, XX },
+ { "movS", RMeSP, Iv64, XX },
+ { "movS", RMeBP, Iv64, XX },
+ { "movS", RMeSI, Iv64, XX },
+ { "movS", RMeDI, Iv64, XX },
+ /* c0 */
+ { GRP2b },
+ { GRP2S },
+ { "retT", Iw, XX, XX },
+ { "retT", XX, XX, XX },
+ { "les{S|}", Gv, Mp, XX },
+ { "ldsS", Gv, Mp, XX },
+ { "movA", Eb, Ib, XX },
+ { "movQ", Ev, Iv, XX },
+ /* c8 */
+ { "enterT", Iw, Ib, XX },
+ { "leaveT", XX, XX, XX },
+ { "lretP", Iw, XX, XX },
+ { "lretP", XX, XX, XX },
+ { "int3", XX, XX, XX },
+ { "int", Ib, XX, XX },
+ { "into{|}", XX, XX, XX },
+ { "iretP", XX, XX, XX },
+ /* d0 */
+ { GRP2b_one },
+ { GRP2S_one },
+ { GRP2b_cl },
+ { GRP2S_cl },
+ { "aam{|}", sIb, XX, XX },
+ { "aad{|}", sIb, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "xlat", DSBX, XX, XX },
+ /* d8 */
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ { FLOAT },
+ /* e0 */
+ { "loopneFH", Jb, XX, loop_jcxz_flag },
+ { "loopeFH", Jb, XX, loop_jcxz_flag },
+ { "loopFH", Jb, XX, loop_jcxz_flag },
+ { "jEcxzH", Jb, XX, loop_jcxz_flag },
+ { "inB", AL, Ib, XX },
+ { "inS", eAX, Ib, XX },
+ { "outB", Ib, AL, XX },
+ { "outS", Ib, eAX, XX },
+ /* e8 */
+ { "callT", Jv, XX, XX },
+ { "jmpT", Jv, XX, XX },
+ { "Jjmp{T|}", Ap, XX, XX },
+ { "jmp", Jb, XX, XX },
+ { "inB", AL, indirDX, XX },
+ { "inS", eAX, indirDX, XX },
+ { "outB", indirDX, AL, XX },
+ { "outS", indirDX, eAX, XX },
+ /* f0 */
+ { "(bad)", XX, XX, XX }, /* lock prefix */
+ { "icebp", XX, XX, XX },
+ { "(bad)", XX, XX, XX }, /* repne */
+ { "(bad)", XX, XX, XX }, /* repz */
+ { "hlt", XX, XX, XX },
+ { "cmc", XX, XX, XX },
+ { GRP3b },
+ { GRP3S },
+ /* f8 */
+ { "clc", XX, XX, XX },
+ { "stc", XX, XX, XX },
+ { "cli", XX, XX, XX },
+ { "sti", XX, XX, XX },
+ { "cld", XX, XX, XX },
+ { "std", XX, XX, XX },
+ { GRP4 },
+ { GRP5 },
+};
+
+static const struct dis386 dis386_twobyte[] = {
+ /* 00 */
+ { GRP6 },
+ { GRP7 },
+ { "larS", Gv, Ew, XX },
+ { "lslS", Gv, Ew, XX },
+ { "(bad)", XX, XX, XX },
+ { "syscall", XX, XX, XX },
+ { "clts", XX, XX, XX },
+ { "sysretP", XX, XX, XX },
+ /* 08 */
+ { "invd", XX, XX, XX },
+ { "wbinvd", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "ud2a", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { GRPAMD },
+ { "femms", XX, XX, XX },
+ { "", MX, EM, OPSUF }, /* See OP_3DNowSuffix. */
+ /* 10 */
+ { PREGRP8 },
+ { PREGRP9 },
+ { PREGRP30 },
+ { "movlpX", EX, XM, SIMD_Fixup, 'h' },
+ { "unpcklpX", XM, EX, XX },
+ { "unpckhpX", XM, EX, XX },
+ { PREGRP31 },
+ { "movhpX", EX, XM, SIMD_Fixup, 'l' },
+ /* 18 */
+ { GRP14 },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ /* 20 */
+ { "movL", Rm, Cm, XX },
+ { "movL", Rm, Dm, XX },
+ { "movL", Cm, Rm, XX },
+ { "movL", Dm, Rm, XX },
+ { "movL", Rd, Td, XX },
+ { "(bad)", XX, XX, XX },
+ { "movL", Td, Rd, XX },
+ { "(bad)", XX, XX, XX },
+ /* 28 */
+ { "movapX", XM, EX, XX },
+ { "movapX", EX, XM, XX },
+ { PREGRP2 },
+ { "movntpX", Ev, XM, XX },
+ { PREGRP4 },
+ { PREGRP3 },
+ { "ucomisX", XM,EX, XX },
+ { "comisX", XM,EX, XX },
+ /* 30 */
+ { "wrmsr", XX, XX, XX },
+ { "rdtsc", XX, XX, XX },
+ { "rdmsr", XX, XX, XX },
+ { "rdpmc", XX, XX, XX },
+ { "sysenter", XX, XX, XX },
+ { "sysexit", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ /* 38 */
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ /* 40 */
+ { "cmovo", Gv, Ev, XX },
+ { "cmovno", Gv, Ev, XX },
+ { "cmovb", Gv, Ev, XX },
+ { "cmovae", Gv, Ev, XX },
+ { "cmove", Gv, Ev, XX },
+ { "cmovne", Gv, Ev, XX },
+ { "cmovbe", Gv, Ev, XX },
+ { "cmova", Gv, Ev, XX },
+ /* 48 */
+ { "cmovs", Gv, Ev, XX },
+ { "cmovns", Gv, Ev, XX },
+ { "cmovp", Gv, Ev, XX },
+ { "cmovnp", Gv, Ev, XX },
+ { "cmovl", Gv, Ev, XX },
+ { "cmovge", Gv, Ev, XX },
+ { "cmovle", Gv, Ev, XX },
+ { "cmovg", Gv, Ev, XX },
+ /* 50 */
+ { "movmskpX", Gdq, XS, XX },
+ { PREGRP13 },
+ { PREGRP12 },
+ { PREGRP11 },
+ { "andpX", XM, EX, XX },
+ { "andnpX", XM, EX, XX },
+ { "orpX", XM, EX, XX },
+ { "xorpX", XM, EX, XX },
+ /* 58 */
+ { PREGRP0 },
+ { PREGRP10 },
+ { PREGRP17 },
+ { PREGRP16 },
+ { PREGRP14 },
+ { PREGRP7 },
+ { PREGRP5 },
+ { PREGRP6 },
+ /* 60 */
+ { "punpcklbw", MX, EM, XX },
+ { "punpcklwd", MX, EM, XX },
+ { "punpckldq", MX, EM, XX },
+ { "packsswb", MX, EM, XX },
+ { "pcmpgtb", MX, EM, XX },
+ { "pcmpgtw", MX, EM, XX },
+ { "pcmpgtd", MX, EM, XX },
+ { "packuswb", MX, EM, XX },
+ /* 68 */
+ { "punpckhbw", MX, EM, XX },
+ { "punpckhwd", MX, EM, XX },
+ { "punpckhdq", MX, EM, XX },
+ { "packssdw", MX, EM, XX },
+ { PREGRP26 },
+ { PREGRP24 },
+ { "movd", MX, Edq, XX },
+ { PREGRP19 },
+ /* 70 */
+ { PREGRP22 },
+ { GRP10 },
+ { GRP11 },
+ { GRP12 },
+ { "pcmpeqb", MX, EM, XX },
+ { "pcmpeqw", MX, EM, XX },
+ { "pcmpeqd", MX, EM, XX },
+ { "emms", XX, XX, XX },
+ /* 78 */
+ { "vmread", Em, Gm, XX },
+ { "vmwrite", Gm, Em, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { PREGRP28 },
+ { PREGRP29 },
+ { PREGRP23 },
+ { PREGRP20 },
+ /* 80 */
+ { "joH", Jv, XX, cond_jump_flag },
+ { "jnoH", Jv, XX, cond_jump_flag },
+ { "jbH", Jv, XX, cond_jump_flag },
+ { "jaeH", Jv, XX, cond_jump_flag },
+ { "jeH", Jv, XX, cond_jump_flag },
+ { "jneH", Jv, XX, cond_jump_flag },
+ { "jbeH", Jv, XX, cond_jump_flag },
+ { "jaH", Jv, XX, cond_jump_flag },
+ /* 88 */
+ { "jsH", Jv, XX, cond_jump_flag },
+ { "jnsH", Jv, XX, cond_jump_flag },
+ { "jpH", Jv, XX, cond_jump_flag },
+ { "jnpH", Jv, XX, cond_jump_flag },
+ { "jlH", Jv, XX, cond_jump_flag },
+ { "jgeH", Jv, XX, cond_jump_flag },
+ { "jleH", Jv, XX, cond_jump_flag },
+ { "jgH", Jv, XX, cond_jump_flag },
+ /* 90 */
+ { "seto", Eb, XX, XX },
+ { "setno", Eb, XX, XX },
+ { "setb", Eb, XX, XX },
+ { "setae", Eb, XX, XX },
+ { "sete", Eb, XX, XX },
+ { "setne", Eb, XX, XX },
+ { "setbe", Eb, XX, XX },
+ { "seta", Eb, XX, XX },
+ /* 98 */
+ { "sets", Eb, XX, XX },
+ { "setns", Eb, XX, XX },
+ { "setp", Eb, XX, XX },
+ { "setnp", Eb, XX, XX },
+ { "setl", Eb, XX, XX },
+ { "setge", Eb, XX, XX },
+ { "setle", Eb, XX, XX },
+ { "setg", Eb, XX, XX },
+ /* a0 */
+ { "pushT", fs, XX, XX },
+ { "popT", fs, XX, XX },
+ { "cpuid", XX, XX, XX },
+ { "btS", Ev, Gv, XX },
+ { "shldS", Ev, Gv, Ib },
+ { "shldS", Ev, Gv, CL },
+ { GRPPADLCK2 },
+ { GRPPADLCK1 },
+ /* a8 */
+ { "pushT", gs, XX, XX },
+ { "popT", gs, XX, XX },
+ { "rsm", XX, XX, XX },
+ { "btsS", Ev, Gv, XX },
+ { "shrdS", Ev, Gv, Ib },
+ { "shrdS", Ev, Gv, CL },
+ { GRP13 },
+ { "imulS", Gv, Ev, XX },
+ /* b0 */
+ { "cmpxchgB", Eb, Gb, XX },
+ { "cmpxchgS", Ev, Gv, XX },
+ { "lssS", Gv, Mp, XX },
+ { "btrS", Ev, Gv, XX },
+ { "lfsS", Gv, Mp, XX },
+ { "lgsS", Gv, Mp, XX },
+ { "movz{bR|x|bR|x}", Gv, Eb, XX },
+ { "movz{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movzww ! */
+ /* b8 */
+ { "(bad)", XX, XX, XX },
+ { "ud2b", XX, XX, XX },
+ { GRP8 },
+ { "btcS", Ev, Gv, XX },
+ { "bsfS", Gv, Ev, XX },
+ { "bsrS", Gv, Ev, XX },
+ { "movs{bR|x|bR|x}", Gv, Eb, XX },
+ { "movs{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movsww ! */
+ /* c0 */
+ { "xaddB", Eb, Gb, XX },
+ { "xaddS", Ev, Gv, XX },
+ { PREGRP1 },
+ { "movntiS", Ev, Gv, XX },
+ { "pinsrw", MX, Edqw, Ib },
+ { "pextrw", Gdq, MS, Ib },
+ { "shufpX", XM, EX, Ib },
+ { GRP9 },
+ /* c8 */
+ { "bswap", RMeAX, XX, XX },
+ { "bswap", RMeCX, XX, XX },
+ { "bswap", RMeDX, XX, XX },
+ { "bswap", RMeBX, XX, XX },
+ { "bswap", RMeSP, XX, XX },
+ { "bswap", RMeBP, XX, XX },
+ { "bswap", RMeSI, XX, XX },
+ { "bswap", RMeDI, XX, XX },
+ /* d0 */
+ { PREGRP27 },
+ { "psrlw", MX, EM, XX },
+ { "psrld", MX, EM, XX },
+ { "psrlq", MX, EM, XX },
+ { "paddq", MX, EM, XX },
+ { "pmullw", MX, EM, XX },
+ { PREGRP21 },
+ { "pmovmskb", Gdq, MS, XX },
+ /* d8 */
+ { "psubusb", MX, EM, XX },
+ { "psubusw", MX, EM, XX },
+ { "pminub", MX, EM, XX },
+ { "pand", MX, EM, XX },
+ { "paddusb", MX, EM, XX },
+ { "paddusw", MX, EM, XX },
+ { "pmaxub", MX, EM, XX },
+ { "pandn", MX, EM, XX },
+ /* e0 */
+ { "pavgb", MX, EM, XX },
+ { "psraw", MX, EM, XX },
+ { "psrad", MX, EM, XX },
+ { "pavgw", MX, EM, XX },
+ { "pmulhuw", MX, EM, XX },
+ { "pmulhw", MX, EM, XX },
+ { PREGRP15 },
+ { PREGRP25 },
+ /* e8 */
+ { "psubsb", MX, EM, XX },
+ { "psubsw", MX, EM, XX },
+ { "pminsw", MX, EM, XX },
+ { "por", MX, EM, XX },
+ { "paddsb", MX, EM, XX },
+ { "paddsw", MX, EM, XX },
+ { "pmaxsw", MX, EM, XX },
+ { "pxor", MX, EM, XX },
+ /* f0 */
+ { PREGRP32 },
+ { "psllw", MX, EM, XX },
+ { "pslld", MX, EM, XX },
+ { "psllq", MX, EM, XX },
+ { "pmuludq", MX, EM, XX },
+ { "pmaddwd", MX, EM, XX },
+ { "psadbw", MX, EM, XX },
+ { PREGRP18 },
+ /* f8 */
+ { "psubb", MX, EM, XX },
+ { "psubw", MX, EM, XX },
+ { "psubd", MX, EM, XX },
+ { "psubq", MX, EM, XX },
+ { "paddb", MX, EM, XX },
+ { "paddw", MX, EM, XX },
+ { "paddd", MX, EM, XX },
+ { "(bad)", XX, XX, XX }
+};
+
+static const unsigned char onebyte_has_modrm[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
+ /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
+ /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
+ /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
+ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
+ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
+ /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
+ /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
+ /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
+ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
+ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
+ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
+ /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
+ /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
+ /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
+ /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+static const unsigned char twobyte_has_modrm[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
+ /* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, /* 1f */
+ /* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */
+ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
+ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
+ /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
+ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
+ /* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */
+ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
+ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
+ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
+ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */
+ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
+ /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
+ /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
+ /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+static const unsigned char twobyte_uses_SSE_prefix[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
+ /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */
+ /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0, /* 2f */
+ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
+ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
+ /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */
+ /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */
+ /* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, /* 7f */
+ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
+ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
+ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
+ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
+ /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
+ /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
+ /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
+ /* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+static char obuf[100];
+static char *obufp;
+static char scratchbuf[100];
+static unsigned char *start_codep;
+static unsigned char *insn_codep;
+static unsigned char *codep;
+static disassemble_info *the_info;
+static int mod;
+static int rm;
+static int reg;
+static unsigned char need_modrm;
+
+/* If we are accessing mod/rm/reg without need_modrm set, then the
+ values are stale. Hitting this abort likely indicates that you
+ need to update onebyte_has_modrm or twobyte_has_modrm. */
+#define MODRM_CHECK if (!need_modrm) abort ()
+
+static const char **names64;
+static const char **names32;
+static const char **names16;
+static const char **names8;
+static const char **names8rex;
+static const char **names_seg;
+static const char **index16;
+
+static const char *intel_names64[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+static const char *intel_names32[] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
+};
+static const char *intel_names16[] = {
+ "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
+};
+static const char *intel_names8[] = {
+ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
+};
+static const char *intel_names8rex[] = {
+ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b"
+};
+static const char *intel_names_seg[] = {
+ "es", "cs", "ss", "ds", "fs", "gs", "?", "?",
+};
+static const char *intel_index16[] = {
+ "bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx"
+};
+
+static const char *att_names64[] = {
+ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
+};
+static const char *att_names32[] = {
+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
+ "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
+};
+static const char *att_names16[] = {
+ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
+ "%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
+};
+static const char *att_names8[] = {
+ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
+};
+static const char *att_names8rex[] = {
+ "%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil",
+ "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
+};
+static const char *att_names_seg[] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?",
+};
+static const char *att_index16[] = {
+ "%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx"
+};
+
+static const struct dis386 grps[][8] = {
+ /* GRP1b */
+ {
+ { "addA", Eb, Ib, XX },
+ { "orA", Eb, Ib, XX },
+ { "adcA", Eb, Ib, XX },
+ { "sbbA", Eb, Ib, XX },
+ { "andA", Eb, Ib, XX },
+ { "subA", Eb, Ib, XX },
+ { "xorA", Eb, Ib, XX },
+ { "cmpA", Eb, Ib, XX }
+ },
+ /* GRP1S */
+ {
+ { "addQ", Ev, Iv, XX },
+ { "orQ", Ev, Iv, XX },
+ { "adcQ", Ev, Iv, XX },
+ { "sbbQ", Ev, Iv, XX },
+ { "andQ", Ev, Iv, XX },
+ { "subQ", Ev, Iv, XX },
+ { "xorQ", Ev, Iv, XX },
+ { "cmpQ", Ev, Iv, XX }
+ },
+ /* GRP1Ss */
+ {
+ { "addQ", Ev, sIb, XX },
+ { "orQ", Ev, sIb, XX },
+ { "adcQ", Ev, sIb, XX },
+ { "sbbQ", Ev, sIb, XX },
+ { "andQ", Ev, sIb, XX },
+ { "subQ", Ev, sIb, XX },
+ { "xorQ", Ev, sIb, XX },
+ { "cmpQ", Ev, sIb, XX }
+ },
+ /* GRP2b */
+ {
+ { "rolA", Eb, Ib, XX },
+ { "rorA", Eb, Ib, XX },
+ { "rclA", Eb, Ib, XX },
+ { "rcrA", Eb, Ib, XX },
+ { "shlA", Eb, Ib, XX },
+ { "shrA", Eb, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "sarA", Eb, Ib, XX },
+ },
+ /* GRP2S */
+ {
+ { "rolQ", Ev, Ib, XX },
+ { "rorQ", Ev, Ib, XX },
+ { "rclQ", Ev, Ib, XX },
+ { "rcrQ", Ev, Ib, XX },
+ { "shlQ", Ev, Ib, XX },
+ { "shrQ", Ev, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "sarQ", Ev, Ib, XX },
+ },
+ /* GRP2b_one */
+ {
+ { "rolA", Eb, I1, XX },
+ { "rorA", Eb, I1, XX },
+ { "rclA", Eb, I1, XX },
+ { "rcrA", Eb, I1, XX },
+ { "shlA", Eb, I1, XX },
+ { "shrA", Eb, I1, XX },
+ { "(bad)", XX, XX, XX },
+ { "sarA", Eb, I1, XX },
+ },
+ /* GRP2S_one */
+ {
+ { "rolQ", Ev, I1, XX },
+ { "rorQ", Ev, I1, XX },
+ { "rclQ", Ev, I1, XX },
+ { "rcrQ", Ev, I1, XX },
+ { "shlQ", Ev, I1, XX },
+ { "shrQ", Ev, I1, XX },
+ { "(bad)", XX, XX, XX},
+ { "sarQ", Ev, I1, XX },
+ },
+ /* GRP2b_cl */
+ {
+ { "rolA", Eb, CL, XX },
+ { "rorA", Eb, CL, XX },
+ { "rclA", Eb, CL, XX },
+ { "rcrA", Eb, CL, XX },
+ { "shlA", Eb, CL, XX },
+ { "shrA", Eb, CL, XX },
+ { "(bad)", XX, XX, XX },
+ { "sarA", Eb, CL, XX },
+ },
+ /* GRP2S_cl */
+ {
+ { "rolQ", Ev, CL, XX },
+ { "rorQ", Ev, CL, XX },
+ { "rclQ", Ev, CL, XX },
+ { "rcrQ", Ev, CL, XX },
+ { "shlQ", Ev, CL, XX },
+ { "shrQ", Ev, CL, XX },
+ { "(bad)", XX, XX, XX },
+ { "sarQ", Ev, CL, XX }
+ },
+ /* GRP3b */
+ {
+ { "testA", Eb, Ib, XX },
+ { "(bad)", Eb, XX, XX },
+ { "notA", Eb, XX, XX },
+ { "negA", Eb, XX, XX },
+ { "mulA", Eb, XX, XX }, /* Don't print the implicit %al register, */
+ { "imulA", Eb, XX, XX }, /* to distinguish these opcodes from other */
+ { "divA", Eb, XX, XX }, /* mul/imul opcodes. Do the same for div */
+ { "idivA", Eb, XX, XX } /* and idiv for consistency. */
+ },
+ /* GRP3S */
+ {
+ { "testQ", Ev, Iv, XX },
+ { "(bad)", XX, XX, XX },
+ { "notQ", Ev, XX, XX },
+ { "negQ", Ev, XX, XX },
+ { "mulQ", Ev, XX, XX }, /* Don't print the implicit register. */
+ { "imulQ", Ev, XX, XX },
+ { "divQ", Ev, XX, XX },
+ { "idivQ", Ev, XX, XX },
+ },
+ /* GRP4 */
+ {
+ { "incA", Eb, XX, XX },
+ { "decA", Eb, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRP5 */
+ {
+ { "incQ", Ev, XX, XX },
+ { "decQ", Ev, XX, XX },
+ { "callT", indirEv, XX, XX },
+ { "JcallT", indirEp, XX, XX },
+ { "jmpT", indirEv, XX, XX },
+ { "JjmpT", indirEp, XX, XX },
+ { "pushU", Ev, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRP6 */
+ {
+ { "sldtQ", Ev, XX, XX },
+ { "strQ", Ev, XX, XX },
+ { "lldt", Ew, XX, XX },
+ { "ltr", Ew, XX, XX },
+ { "verr", Ew, XX, XX },
+ { "verw", Ew, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX }
+ },
+ /* GRP7 */
+ {
+ { "sgdtIQ", VMX_Fixup, 0, XX, XX },
+ { "sidtIQ", PNI_Fixup, 0, XX, XX },
+ { "lgdt{Q|Q||}", M, XX, XX },
+ { "lidt{Q|Q||}", SVME_Fixup, 0, XX, XX },
+ { "smswQ", Ev, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "lmsw", Ew, XX, XX },
+ { "invlpg", INVLPG_Fixup, w_mode, XX, XX },
+ },
+ /* GRP8 */
+ {
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "btQ", Ev, Ib, XX },
+ { "btsQ", Ev, Ib, XX },
+ { "btrQ", Ev, Ib, XX },
+ { "btcQ", Ev, Ib, XX },
+ },
+ /* GRP9 */
+ {
+ { "(bad)", XX, XX, XX },
+ { "cmpxchg8b", Eq, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "", VM, XX, XX }, /* See OP_VMX. */
+ { "vmptrst", Eq, XX, XX },
+ },
+ /* GRP10 */
+ {
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "psrlw", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "psraw", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "psllw", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRP11 */
+ {
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "psrld", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "psrad", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "pslld", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRP12 */
+ {
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "psrlq", MS, Ib, XX },
+ { "psrldq", MS, Ib, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "psllq", MS, Ib, XX },
+ { "pslldq", MS, Ib, XX },
+ },
+ /* GRP13 */
+ {
+ { "fxsave", Ev, XX, XX },
+ { "fxrstor", Ev, XX, XX },
+ { "ldmxcsr", Ev, XX, XX },
+ { "stmxcsr", Ev, XX, XX },
+ { "xsave", Ev, XX, XX },
+ { "xrstor", OP_0fae, 0, XX, XX },
+ { "mfence", OP_0fae, 0, XX, XX },
+ { "clflush", OP_0fae, 0, XX, XX },
+ },
+ /* GRP14 */
+ {
+ { "prefetchnta", Ev, XX, XX },
+ { "prefetcht0", Ev, XX, XX },
+ { "prefetcht1", Ev, XX, XX },
+ { "prefetcht2", Ev, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRPAMD */
+ {
+ { "prefetch", Eb, XX, XX },
+ { "prefetchw", Eb, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* GRPPADLCK1 */
+ {
+ { "xstore-rng", OP_0f07, 0, XX, XX },
+ { "xcrypt-ecb", OP_0f07, 0, XX, XX },
+ { "xcrypt-cbc", OP_0f07, 0, XX, XX },
+ { "xcrypt-ctr", OP_0f07, 0, XX, XX },
+ { "xcrypt-cfb", OP_0f07, 0, XX, XX },
+ { "xcrypt-ofb", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ },
+ /* GRPPADLCK2 */
+ {
+ { "montmul", OP_0f07, 0, XX, XX },
+ { "xsha1", OP_0f07, 0, XX, XX },
+ { "xsha256", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ { "(bad)", OP_0f07, 0, XX, XX },
+ }
+};
+
+static const struct dis386 prefix_user_table[][4] = {
+ /* PREGRP0 */
+ {
+ { "addps", XM, EX, XX },
+ { "addss", XM, EX, XX },
+ { "addpd", XM, EX, XX },
+ { "addsd", XM, EX, XX },
+ },
+ /* PREGRP1 */
+ {
+ { "", XM, EX, OPSIMD }, /* See OP_SIMD_SUFFIX. */
+ { "", XM, EX, OPSIMD },
+ { "", XM, EX, OPSIMD },
+ { "", XM, EX, OPSIMD },
+ },
+ /* PREGRP2 */
+ {
+ { "cvtpi2ps", XM, EM, XX },
+ { "cvtsi2ssY", XM, Ev, XX },
+ { "cvtpi2pd", XM, EM, XX },
+ { "cvtsi2sdY", XM, Ev, XX },
+ },
+ /* PREGRP3 */
+ {
+ { "cvtps2pi", MX, EX, XX },
+ { "cvtss2siY", Gv, EX, XX },
+ { "cvtpd2pi", MX, EX, XX },
+ { "cvtsd2siY", Gv, EX, XX },
+ },
+ /* PREGRP4 */
+ {
+ { "cvttps2pi", MX, EX, XX },
+ { "cvttss2siY", Gv, EX, XX },
+ { "cvttpd2pi", MX, EX, XX },
+ { "cvttsd2siY", Gv, EX, XX },
+ },
+ /* PREGRP5 */
+ {
+ { "divps", XM, EX, XX },
+ { "divss", XM, EX, XX },
+ { "divpd", XM, EX, XX },
+ { "divsd", XM, EX, XX },
+ },
+ /* PREGRP6 */
+ {
+ { "maxps", XM, EX, XX },
+ { "maxss", XM, EX, XX },
+ { "maxpd", XM, EX, XX },
+ { "maxsd", XM, EX, XX },
+ },
+ /* PREGRP7 */
+ {
+ { "minps", XM, EX, XX },
+ { "minss", XM, EX, XX },
+ { "minpd", XM, EX, XX },
+ { "minsd", XM, EX, XX },
+ },
+ /* PREGRP8 */
+ {
+ { "movups", XM, EX, XX },
+ { "movss", XM, EX, XX },
+ { "movupd", XM, EX, XX },
+ { "movsd", XM, EX, XX },
+ },
+ /* PREGRP9 */
+ {
+ { "movups", EX, XM, XX },
+ { "movss", EX, XM, XX },
+ { "movupd", EX, XM, XX },
+ { "movsd", EX, XM, XX },
+ },
+ /* PREGRP10 */
+ {
+ { "mulps", XM, EX, XX },
+ { "mulss", XM, EX, XX },
+ { "mulpd", XM, EX, XX },
+ { "mulsd", XM, EX, XX },
+ },
+ /* PREGRP11 */
+ {
+ { "rcpps", XM, EX, XX },
+ { "rcpss", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP12 */
+ {
+ { "rsqrtps", XM, EX, XX },
+ { "rsqrtss", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP13 */
+ {
+ { "sqrtps", XM, EX, XX },
+ { "sqrtss", XM, EX, XX },
+ { "sqrtpd", XM, EX, XX },
+ { "sqrtsd", XM, EX, XX },
+ },
+ /* PREGRP14 */
+ {
+ { "subps", XM, EX, XX },
+ { "subss", XM, EX, XX },
+ { "subpd", XM, EX, XX },
+ { "subsd", XM, EX, XX },
+ },
+ /* PREGRP15 */
+ {
+ { "(bad)", XM, EX, XX },
+ { "cvtdq2pd", XM, EX, XX },
+ { "cvttpd2dq", XM, EX, XX },
+ { "cvtpd2dq", XM, EX, XX },
+ },
+ /* PREGRP16 */
+ {
+ { "cvtdq2ps", XM, EX, XX },
+ { "cvttps2dq",XM, EX, XX },
+ { "cvtps2dq",XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP17 */
+ {
+ { "cvtps2pd", XM, EX, XX },
+ { "cvtss2sd", XM, EX, XX },
+ { "cvtpd2ps", XM, EX, XX },
+ { "cvtsd2ss", XM, EX, XX },
+ },
+ /* PREGRP18 */
+ {
+ { "maskmovq", MX, MS, XX },
+ { "(bad)", XM, EX, XX },
+ { "maskmovdqu", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP19 */
+ {
+ { "movq", MX, EM, XX },
+ { "movdqu", XM, EX, XX },
+ { "movdqa", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP20 */
+ {
+ { "movq", EM, MX, XX },
+ { "movdqu", EX, XM, XX },
+ { "movdqa", EX, XM, XX },
+ { "(bad)", EX, XM, XX },
+ },
+ /* PREGRP21 */
+ {
+ { "(bad)", EX, XM, XX },
+ { "movq2dq", XM, MS, XX },
+ { "movq", EX, XM, XX },
+ { "movdq2q", MX, XS, XX },
+ },
+ /* PREGRP22 */
+ {
+ { "pshufw", MX, EM, Ib },
+ { "pshufhw", XM, EX, Ib },
+ { "pshufd", XM, EX, Ib },
+ { "pshuflw", XM, EX, Ib },
+ },
+ /* PREGRP23 */
+ {
+ { "movd", Edq, MX, XX },
+ { "movq", XM, EX, XX },
+ { "movd", Edq, XM, XX },
+ { "(bad)", Ed, XM, XX },
+ },
+ /* PREGRP24 */
+ {
+ { "(bad)", MX, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "punpckhqdq", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP25 */
+ {
+ { "movntq", EM, MX, XX },
+ { "(bad)", EM, XM, XX },
+ { "movntdq", EM, XM, XX },
+ { "(bad)", EM, XM, XX },
+ },
+ /* PREGRP26 */
+ {
+ { "(bad)", MX, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "punpcklqdq", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP27 */
+ {
+ { "(bad)", MX, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "addsubpd", XM, EX, XX },
+ { "addsubps", XM, EX, XX },
+ },
+ /* PREGRP28 */
+ {
+ { "(bad)", MX, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "haddpd", XM, EX, XX },
+ { "haddps", XM, EX, XX },
+ },
+ /* PREGRP29 */
+ {
+ { "(bad)", MX, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "hsubpd", XM, EX, XX },
+ { "hsubps", XM, EX, XX },
+ },
+ /* PREGRP30 */
+ {
+ { "movlpX", XM, EX, SIMD_Fixup, 'h' }, /* really only 2 operands */
+ { "movsldup", XM, EX, XX },
+ { "movlpd", XM, EX, XX },
+ { "movddup", XM, EX, XX },
+ },
+ /* PREGRP31 */
+ {
+ { "movhpX", XM, EX, SIMD_Fixup, 'l' },
+ { "movshdup", XM, EX, XX },
+ { "movhpd", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ },
+ /* PREGRP32 */
+ {
+ { "(bad)", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "(bad)", XM, EX, XX },
+ { "lddqu", XM, M, XX },
+ },
+};
+
+static const struct dis386 x86_64_table[][2] = {
+ {
+ { "arpl", Ew, Gw, XX },
+ { "movs{||lq|xd}", Gv, Ed, XX },
+ },
+};
+
+#ifdef __KERNEL__
+#define INTERNAL_DISASSEMBLER_ERROR "<internal disassembler error>"
+#else /* __KERNEL__ */
+#define INTERNAL_DISASSEMBLER_ERROR _("<internal disassembler error>")
+#endif /* __KERNEL__ */
+
+static void
+ckprefix (void)
+{
+ int newrex;
+ rex = 0;
+ prefixes = 0;
+ used_prefixes = 0;
+ rex_used = 0;
+ while (1)
+ {
+ FETCH_DATA (the_info, codep + 1);
+ newrex = 0;
+ switch (*codep)
+ {
+ /* REX prefixes family. */
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x4e:
+ case 0x4f:
+ if (mode_64bit)
+ newrex = *codep;
+ else
+ return;
+ break;
+ case 0xf3:
+ prefixes |= PREFIX_REPZ;
+ break;
+ case 0xf2:
+ prefixes |= PREFIX_REPNZ;
+ break;
+ case 0xf0:
+ prefixes |= PREFIX_LOCK;
+ break;
+ case 0x2e:
+ prefixes |= PREFIX_CS;
+ break;
+ case 0x36:
+ prefixes |= PREFIX_SS;
+ break;
+ case 0x3e:
+ prefixes |= PREFIX_DS;
+ break;
+ case 0x26:
+ prefixes |= PREFIX_ES;
+ break;
+ case 0x64:
+ prefixes |= PREFIX_FS;
+ break;
+ case 0x65:
+ prefixes |= PREFIX_GS;
+ break;
+ case 0x66:
+ prefixes |= PREFIX_DATA;
+ break;
+ case 0x67:
+ prefixes |= PREFIX_ADDR;
+ break;
+ case FWAIT_OPCODE:
+ /* fwait is really an instruction. If there are prefixes
+ before the fwait, they belong to the fwait, *not* to the
+ following instruction. */
+ if (prefixes)
+ {
+ prefixes |= PREFIX_FWAIT;
+ codep++;
+ return;
+ }
+ prefixes = PREFIX_FWAIT;
+ break;
+ default:
+ return;
+ }
+ /* Rex is ignored when followed by another prefix. */
+ if (rex)
+ {
+ oappend (prefix_name (rex, 0));
+ oappend (" ");
+ }
+ rex = newrex;
+ codep++;
+ }
+}
+
+/* Return the name of the prefix byte PREF, or NULL if PREF is not a
+ prefix byte. */
+
+static const char *
+prefix_name (int pref, int sizeflag)
+{
+ switch (pref)
+ {
+ /* REX prefixes family. */
+ case 0x40:
+ return "rex";
+ case 0x41:
+ return "rexZ";
+ case 0x42:
+ return "rexY";
+ case 0x43:
+ return "rexYZ";
+ case 0x44:
+ return "rexX";
+ case 0x45:
+ return "rexXZ";
+ case 0x46:
+ return "rexXY";
+ case 0x47:
+ return "rexXYZ";
+ case 0x48:
+ return "rex64";
+ case 0x49:
+ return "rex64Z";
+ case 0x4a:
+ return "rex64Y";
+ case 0x4b:
+ return "rex64YZ";
+ case 0x4c:
+ return "rex64X";
+ case 0x4d:
+ return "rex64XZ";
+ case 0x4e:
+ return "rex64XY";
+ case 0x4f:
+ return "rex64XYZ";
+ case 0xf3:
+ return "repz";
+ case 0xf2:
+ return "repnz";
+ case 0xf0:
+ return "lock";
+ case 0x2e:
+ return "cs";
+ case 0x36:
+ return "ss";
+ case 0x3e:
+ return "ds";
+ case 0x26:
+ return "es";
+ case 0x64:
+ return "fs";
+ case 0x65:
+ return "gs";
+ case 0x66:
+ return (sizeflag & DFLAG) ? "data16" : "data32";
+ case 0x67:
+ if (mode_64bit)
+ return (sizeflag & AFLAG) ? "addr32" : "addr64";
+ else
+ return (sizeflag & AFLAG) ? "addr16" : "addr32";
+ case FWAIT_OPCODE:
+ return "fwait";
+ default:
+ return NULL;
+ }
+}
+
+static char op1out[100], op2out[100], op3out[100];
+static int op_ad, op_index[3];
+static int two_source_ops;
+static bfd_vma op_address[3];
+static bfd_vma op_riprel[3];
+static bfd_vma start_pc;
+
+/*
+ * On the 386's of 1988, the maximum length of an instruction is 15 bytes.
+ * (see topic "Redundant prefixes" in the "Differences from 8086"
+ * section of the "Virtual 8086 Mode" chapter.)
+ * 'pc' should be the address of this instruction, it will
+ * be used to print the target address if this is a relative jump or call
+ * The function returns the length of this instruction in bytes.
+ */
+
+static char intel_syntax;
+static char open_char;
+static char close_char;
+static char separator_char;
+static char scale_char;
+
+/* Here for backwards compatibility. When gdb stops using
+ print_insn_i386_att and print_insn_i386_intel these functions can
+ disappear, and print_insn_i386 be merged into print_insn. */
+int
+print_insn_i386_att (bfd_vma pc, disassemble_info *info)
+{
+ intel_syntax = 0;
+
+ return print_insn (pc, info);
+}
+
+int
+print_insn_i386_intel (bfd_vma pc, disassemble_info *info)
+{
+ intel_syntax = 1;
+
+ return print_insn (pc, info);
+}
+
+int
+print_insn_i386 (bfd_vma pc, disassemble_info *info)
+{
+ intel_syntax = -1;
+
+ return print_insn (pc, info);
+}
+
+static int
+print_insn (bfd_vma pc, disassemble_info *info)
+{
+ const struct dis386 *dp;
+ int i;
+ char *first, *second, *third;
+ int needcomma;
+ unsigned char uses_SSE_prefix, uses_LOCK_prefix;
+ int sizeflag;
+ const char *p;
+ struct dis_private priv;
+
+ mode_64bit = (info->mach == bfd_mach_x86_64_intel_syntax
+ || info->mach == bfd_mach_x86_64);
+
+ if (intel_syntax == (char) -1)
+ intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax
+ || info->mach == bfd_mach_x86_64_intel_syntax);
+
+ if (info->mach == bfd_mach_i386_i386
+ || info->mach == bfd_mach_x86_64
+ || info->mach == bfd_mach_i386_i386_intel_syntax
+ || info->mach == bfd_mach_x86_64_intel_syntax)
+ priv.orig_sizeflag = AFLAG | DFLAG;
+ else if (info->mach == bfd_mach_i386_i8086)
+ priv.orig_sizeflag = 0;
+ else
+ abort ();
+
+ for (p = info->disassembler_options; p != NULL; )
+ {
+ if (strncmp (p, "x86-64", 6) == 0)
+ {
+ mode_64bit = 1;
+ priv.orig_sizeflag = AFLAG | DFLAG;
+ }
+ else if (strncmp (p, "i386", 4) == 0)
+ {
+ mode_64bit = 0;
+ priv.orig_sizeflag = AFLAG | DFLAG;
+ }
+ else if (strncmp (p, "i8086", 5) == 0)
+ {
+ mode_64bit = 0;
+ priv.orig_sizeflag = 0;
+ }
+ else if (strncmp (p, "intel", 5) == 0)
+ {
+ intel_syntax = 1;
+ }
+ else if (strncmp (p, "att", 3) == 0)
+ {
+ intel_syntax = 0;
+ }
+ else if (strncmp (p, "addr", 4) == 0)
+ {
+ if (p[4] == '1' && p[5] == '6')
+ priv.orig_sizeflag &= ~AFLAG;
+ else if (p[4] == '3' && p[5] == '2')
+ priv.orig_sizeflag |= AFLAG;
+ }
+ else if (strncmp (p, "data", 4) == 0)
+ {
+ if (p[4] == '1' && p[5] == '6')
+ priv.orig_sizeflag &= ~DFLAG;
+ else if (p[4] == '3' && p[5] == '2')
+ priv.orig_sizeflag |= DFLAG;
+ }
+ else if (strncmp (p, "suffix", 6) == 0)
+ priv.orig_sizeflag |= SUFFIX_ALWAYS;
+
+ p = strchr (p, ',');
+ if (p != NULL)
+ p++;
+ }
+
+ if (intel_syntax)
+ {
+ names64 = intel_names64;
+ names32 = intel_names32;
+ names16 = intel_names16;
+ names8 = intel_names8;
+ names8rex = intel_names8rex;
+ names_seg = intel_names_seg;
+ index16 = intel_index16;
+ open_char = '[';
+ close_char = ']';
+ separator_char = '+';
+ scale_char = '*';
+ }
+ else
+ {
+ names64 = att_names64;
+ names32 = att_names32;
+ names16 = att_names16;
+ names8 = att_names8;
+ names8rex = att_names8rex;
+ names_seg = att_names_seg;
+ index16 = att_index16;
+ open_char = '(';
+ close_char = ')';
+ separator_char = ',';
+ scale_char = ',';
+ }
+
+ /* The output looks better if we put 7 bytes on a line, since that
+ puts most long word instructions on a single line. */
+ info->bytes_per_line = 7;
+
+ info->private_data = &priv;
+ priv.max_fetched = priv.the_buffer;
+ priv.insn_start = pc;
+
+ obuf[0] = 0;
+ op1out[0] = 0;
+ op2out[0] = 0;
+ op3out[0] = 0;
+
+ op_index[0] = op_index[1] = op_index[2] = -1;
+
+ the_info = info;
+ start_pc = pc;
+ start_codep = priv.the_buffer;
+ codep = priv.the_buffer;
+
+#ifndef __KERNEL__
+ if (setjmp (priv.bailout) != 0)
+ {
+ const char *name;
+
+ /* Getting here means we tried for data but didn't get it. That
+ means we have an incomplete instruction of some sort. Just
+ print the first byte as a prefix or a .byte pseudo-op. */
+ if (codep > priv.the_buffer)
+ {
+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
+ if (name != NULL)
+ (*info->fprintf_func) (info->stream, "%s", name);
+ else
+ {
+ /* Just print the first byte as a .byte instruction. */
+ (*info->fprintf_func) (info->stream, ".byte 0x%x",
+ (unsigned int) priv.the_buffer[0]);
+ }
+
+ return 1;
+ }
+
+ return -1;
+ }
+#endif /* __KERNEL__ */
+
+ obufp = obuf;
+ ckprefix ();
+
+ insn_codep = codep;
+ sizeflag = priv.orig_sizeflag;
+
+ FETCH_DATA (info, codep + 1);
+ two_source_ops = (*codep == 0x62) || (*codep == 0xc8);
+
+ if ((prefixes & PREFIX_FWAIT)
+ && ((*codep < 0xd8) || (*codep > 0xdf)))
+ {
+ const char *name;
+
+ /* fwait not followed by floating point instruction. Print the
+ first prefix, which is probably fwait itself. */
+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
+ if (name == NULL)
+ name = INTERNAL_DISASSEMBLER_ERROR;
+ (*info->fprintf_func) (info->stream, "%s", name);
+ return 1;
+ }
+
+ if (*codep == 0x0f)
+ {
+ FETCH_DATA (info, codep + 2);
+ dp = &dis386_twobyte[*++codep];
+ need_modrm = twobyte_has_modrm[*codep];
+ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep];
+ uses_LOCK_prefix = (*codep & ~0x02) == 0x20;
+ }
+ else
+ {
+ dp = &dis386[*codep];
+ need_modrm = onebyte_has_modrm[*codep];
+ uses_SSE_prefix = 0;
+ uses_LOCK_prefix = 0;
+ }
+ codep++;
+
+ if (!uses_SSE_prefix && (prefixes & PREFIX_REPZ))
+ {
+ oappend ("repz ");
+ used_prefixes |= PREFIX_REPZ;
+ }
+ if (!uses_SSE_prefix && (prefixes & PREFIX_REPNZ))
+ {
+ oappend ("repnz ");
+ used_prefixes |= PREFIX_REPNZ;
+ }
+ if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK))
+ {
+ oappend ("lock ");
+ used_prefixes |= PREFIX_LOCK;
+ }
+
+ if (prefixes & PREFIX_ADDR)
+ {
+ sizeflag ^= AFLAG;
+ if (dp->bytemode3 != loop_jcxz_mode || intel_syntax)
+ {
+ if ((sizeflag & AFLAG) || mode_64bit)
+ oappend ("addr32 ");
+ else
+ oappend ("addr16 ");
+ used_prefixes |= PREFIX_ADDR;
+ }
+ }
+
+ if (!uses_SSE_prefix && (prefixes & PREFIX_DATA))
+ {
+ sizeflag ^= DFLAG;
+ if (dp->bytemode3 == cond_jump_mode
+ && dp->bytemode1 == v_mode
+ && !intel_syntax)
+ {
+ if (sizeflag & DFLAG)
+ oappend ("data32 ");
+ else
+ oappend ("data16 ");
+ used_prefixes |= PREFIX_DATA;
+ }
+ }
+
+ if (need_modrm)
+ {
+ FETCH_DATA (info, codep + 1);
+ mod = (*codep >> 6) & 3;
+ reg = (*codep >> 3) & 7;
+ rm = *codep & 7;
+ }
+
+ if (dp->name == NULL && dp->bytemode1 == FLOATCODE)
+ {
+ dofloat (sizeflag);
+ }
+ else
+ {
+ int index;
+ if (dp->name == NULL)
+ {
+ switch (dp->bytemode1)
+ {
+ case USE_GROUPS:
+ dp = &grps[dp->bytemode2][reg];
+ break;
+
+ case USE_PREFIX_USER_TABLE:
+ index = 0;
+ used_prefixes |= (prefixes & PREFIX_REPZ);
+ if (prefixes & PREFIX_REPZ)
+ index = 1;
+ else
+ {
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (prefixes & PREFIX_DATA)
+ index = 2;
+ else
+ {
+ used_prefixes |= (prefixes & PREFIX_REPNZ);
+ if (prefixes & PREFIX_REPNZ)
+ index = 3;
+ }
+ }
+ dp = &prefix_user_table[dp->bytemode2][index];
+ break;
+
+ case X86_64_SPECIAL:
+ dp = &x86_64_table[dp->bytemode2][mode_64bit];
+ break;
+
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ break;
+ }
+ }
+
+ if (putop (dp->name, sizeflag) == 0)
+ {
+ obufp = op1out;
+ op_ad = 2;
+ if (dp->op1)
+ (*dp->op1) (dp->bytemode1, sizeflag);
+
+ obufp = op2out;
+ op_ad = 1;
+ if (dp->op2)
+ (*dp->op2) (dp->bytemode2, sizeflag);
+
+ obufp = op3out;
+ op_ad = 0;
+ if (dp->op3)
+ (*dp->op3) (dp->bytemode3, sizeflag);
+ }
+ }
+
+ /* See if any prefixes were not used. If so, print the first one
+ separately. If we don't do this, we'll wind up printing an
+ instruction stream which does not precisely correspond to the
+ bytes we are disassembling. */
+ if ((prefixes & ~used_prefixes) != 0)
+ {
+ const char *name;
+
+ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
+ if (name == NULL)
+ name = INTERNAL_DISASSEMBLER_ERROR;
+ (*info->fprintf_func) (info->stream, "%s", name);
+ return 1;
+ }
+ if (rex & ~rex_used)
+ {
+ const char *name;
+ name = prefix_name (rex | 0x40, priv.orig_sizeflag);
+ if (name == NULL)
+ name = INTERNAL_DISASSEMBLER_ERROR;
+ (*info->fprintf_func) (info->stream, "%s ", name);
+ }
+
+ obufp = obuf + strlen (obuf);
+ for (i = strlen (obuf); i < 6; i++)
+ oappend (" ");
+ oappend (" ");
+ (*info->fprintf_func) (info->stream, "%s", obuf);
+
+ /* The enter and bound instructions are printed with operands in the same
+ order as the intel book; everything else is printed in reverse order. */
+ if (intel_syntax || two_source_ops)
+ {
+ first = op1out;
+ second = op2out;
+ third = op3out;
+ op_ad = op_index[0];
+ op_index[0] = op_index[2];
+ op_index[2] = op_ad;
+ }
+ else
+ {
+ first = op3out;
+ second = op2out;
+ third = op1out;
+ }
+ needcomma = 0;
+ if (*first)
+ {
+ if (op_index[0] != -1 && !op_riprel[0])
+ (*info->print_address_func) ((bfd_vma) op_address[op_index[0]], info);
+ else
+ (*info->fprintf_func) (info->stream, "%s", first);
+ needcomma = 1;
+ }
+ if (*second)
+ {
+ if (needcomma)
+ (*info->fprintf_func) (info->stream, ",");
+ if (op_index[1] != -1 && !op_riprel[1])
+ (*info->print_address_func) ((bfd_vma) op_address[op_index[1]], info);
+ else
+ (*info->fprintf_func) (info->stream, "%s", second);
+ needcomma = 1;
+ }
+ if (*third)
+ {
+ if (needcomma)
+ (*info->fprintf_func) (info->stream, ",");
+ if (op_index[2] != -1 && !op_riprel[2])
+ (*info->print_address_func) ((bfd_vma) op_address[op_index[2]], info);
+ else
+ (*info->fprintf_func) (info->stream, "%s", third);
+ }
+ for (i = 0; i < 3; i++)
+ if (op_index[i] != -1 && op_riprel[i])
+ {
+ (*info->fprintf_func) (info->stream, " # ");
+ (*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep
+ + op_address[op_index[i]]), info);
+ }
+ return codep - priv.the_buffer;
+}
+
+static const char *float_mem[] = {
+ /* d8 */
+ "fadd{s||s|}",
+ "fmul{s||s|}",
+ "fcom{s||s|}",
+ "fcomp{s||s|}",
+ "fsub{s||s|}",
+ "fsubr{s||s|}",
+ "fdiv{s||s|}",
+ "fdivr{s||s|}",
+ /* d9 */
+ "fld{s||s|}",
+ "(bad)",
+ "fst{s||s|}",
+ "fstp{s||s|}",
+ "fldenvIC",
+ "fldcw",
+ "fNstenvIC",
+ "fNstcw",
+ /* da */
+ "fiadd{l||l|}",
+ "fimul{l||l|}",
+ "ficom{l||l|}",
+ "ficomp{l||l|}",
+ "fisub{l||l|}",
+ "fisubr{l||l|}",
+ "fidiv{l||l|}",
+ "fidivr{l||l|}",
+ /* db */
+ "fild{l||l|}",
+ "fisttp{l||l|}",
+ "fist{l||l|}",
+ "fistp{l||l|}",
+ "(bad)",
+ "fld{t||t|}",
+ "(bad)",
+ "fstp{t||t|}",
+ /* dc */
+ "fadd{l||l|}",
+ "fmul{l||l|}",
+ "fcom{l||l|}",
+ "fcomp{l||l|}",
+ "fsub{l||l|}",
+ "fsubr{l||l|}",
+ "fdiv{l||l|}",
+ "fdivr{l||l|}",
+ /* dd */
+ "fld{l||l|}",
+ "fisttp{ll||ll|}",
+ "fst{l||l|}",
+ "fstp{l||l|}",
+ "frstorIC",
+ "(bad)",
+ "fNsaveIC",
+ "fNstsw",
+ /* de */
+ "fiadd",
+ "fimul",
+ "ficom",
+ "ficomp",
+ "fisub",
+ "fisubr",
+ "fidiv",
+ "fidivr",
+ /* df */
+ "fild",
+ "fisttp",
+ "fist",
+ "fistp",
+ "fbld",
+ "fild{ll||ll|}",
+ "fbstp",
+ "fistp{ll||ll|}",
+};
+
+static const unsigned char float_mem_mode[] = {
+ /* d8 */
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ /* d9 */
+ d_mode,
+ 0,
+ d_mode,
+ d_mode,
+ 0,
+ w_mode,
+ 0,
+ w_mode,
+ /* da */
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ /* db */
+ d_mode,
+ d_mode,
+ d_mode,
+ d_mode,
+ 0,
+ t_mode,
+ 0,
+ t_mode,
+ /* dc */
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ /* dd */
+ q_mode,
+ q_mode,
+ q_mode,
+ q_mode,
+ 0,
+ 0,
+ 0,
+ w_mode,
+ /* de */
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ /* df */
+ w_mode,
+ w_mode,
+ w_mode,
+ w_mode,
+ t_mode,
+ q_mode,
+ t_mode,
+ q_mode
+};
+
+#define ST OP_ST, 0
+#define STi OP_STi, 0
+
+#define FGRPd9_2 NULL, NULL, 0, NULL, 0, NULL, 0
+#define FGRPd9_4 NULL, NULL, 1, NULL, 0, NULL, 0
+#define FGRPd9_5 NULL, NULL, 2, NULL, 0, NULL, 0
+#define FGRPd9_6 NULL, NULL, 3, NULL, 0, NULL, 0
+#define FGRPd9_7 NULL, NULL, 4, NULL, 0, NULL, 0
+#define FGRPda_5 NULL, NULL, 5, NULL, 0, NULL, 0
+#define FGRPdb_4 NULL, NULL, 6, NULL, 0, NULL, 0
+#define FGRPde_3 NULL, NULL, 7, NULL, 0, NULL, 0
+#define FGRPdf_4 NULL, NULL, 8, NULL, 0, NULL, 0
+
+static const struct dis386 float_reg[][8] = {
+ /* d8 */
+ {
+ { "fadd", ST, STi, XX },
+ { "fmul", ST, STi, XX },
+ { "fcom", STi, XX, XX },
+ { "fcomp", STi, XX, XX },
+ { "fsub", ST, STi, XX },
+ { "fsubr", ST, STi, XX },
+ { "fdiv", ST, STi, XX },
+ { "fdivr", ST, STi, XX },
+ },
+ /* d9 */
+ {
+ { "fld", STi, XX, XX },
+ { "fxch", STi, XX, XX },
+ { FGRPd9_2 },
+ { "(bad)", XX, XX, XX },
+ { FGRPd9_4 },
+ { FGRPd9_5 },
+ { FGRPd9_6 },
+ { FGRPd9_7 },
+ },
+ /* da */
+ {
+ { "fcmovb", ST, STi, XX },
+ { "fcmove", ST, STi, XX },
+ { "fcmovbe",ST, STi, XX },
+ { "fcmovu", ST, STi, XX },
+ { "(bad)", XX, XX, XX },
+ { FGRPda_5 },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* db */
+ {
+ { "fcmovnb",ST, STi, XX },
+ { "fcmovne",ST, STi, XX },
+ { "fcmovnbe",ST, STi, XX },
+ { "fcmovnu",ST, STi, XX },
+ { FGRPdb_4 },
+ { "fucomi", ST, STi, XX },
+ { "fcomi", ST, STi, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* dc */
+ {
+ { "fadd", STi, ST, XX },
+ { "fmul", STi, ST, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+#if UNIXWARE_COMPAT
+ { "fsub", STi, ST, XX },
+ { "fsubr", STi, ST, XX },
+ { "fdiv", STi, ST, XX },
+ { "fdivr", STi, ST, XX },
+#else
+ { "fsubr", STi, ST, XX },
+ { "fsub", STi, ST, XX },
+ { "fdivr", STi, ST, XX },
+ { "fdiv", STi, ST, XX },
+#endif
+ },
+ /* dd */
+ {
+ { "ffree", STi, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "fst", STi, XX, XX },
+ { "fstp", STi, XX, XX },
+ { "fucom", STi, XX, XX },
+ { "fucomp", STi, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ },
+ /* de */
+ {
+ { "faddp", STi, ST, XX },
+ { "fmulp", STi, ST, XX },
+ { "(bad)", XX, XX, XX },
+ { FGRPde_3 },
+#if UNIXWARE_COMPAT
+ { "fsubp", STi, ST, XX },
+ { "fsubrp", STi, ST, XX },
+ { "fdivp", STi, ST, XX },
+ { "fdivrp", STi, ST, XX },
+#else
+ { "fsubrp", STi, ST, XX },
+ { "fsubp", STi, ST, XX },
+ { "fdivrp", STi, ST, XX },
+ { "fdivp", STi, ST, XX },
+#endif
+ },
+ /* df */
+ {
+ { "ffreep", STi, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { "(bad)", XX, XX, XX },
+ { FGRPdf_4 },
+ { "fucomip",ST, STi, XX },
+ { "fcomip", ST, STi, XX },
+ { "(bad)", XX, XX, XX },
+ },
+};
+
+static char *fgrps[][8] = {
+ /* d9_2 0 */
+ {
+ "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
+ },
+
+ /* d9_4 1 */
+ {
+ "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)",
+ },
+
+ /* d9_5 2 */
+ {
+ "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)",
+ },
+
+ /* d9_6 3 */
+ {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp",
+ },
+
+ /* d9_7 4 */
+ {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos",
+ },
+
+ /* da_5 5 */
+ {
+ "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
+ },
+
+ /* db_4 6 */
+ {
+ "feni(287 only)","fdisi(287 only)","fNclex","fNinit",
+ "fNsetpm(287 only)","(bad)","(bad)","(bad)",
+ },
+
+ /* de_3 7 */
+ {
+ "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
+ },
+
+ /* df_4 8 */
+ {
+ "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
+ },
+};
+
+static void
+dofloat (int sizeflag)
+{
+ const struct dis386 *dp;
+ unsigned char floatop;
+
+ floatop = codep[-1];
+
+ if (mod != 3)
+ {
+ int fp_indx = (floatop - 0xd8) * 8 + reg;
+
+ putop (float_mem[fp_indx], sizeflag);
+ obufp = op1out;
+ OP_E (float_mem_mode[fp_indx], sizeflag);
+ return;
+ }
+ /* Skip mod/rm byte. */
+ MODRM_CHECK;
+ codep++;
+
+ dp = &float_reg[floatop - 0xd8][reg];
+ if (dp->name == NULL)
+ {
+ putop (fgrps[dp->bytemode1][rm], sizeflag);
+
+ /* Instruction fnstsw is only one with strange arg. */
+ if (floatop == 0xdf && codep[-1] == 0xe0)
+ strcpy (op1out, names16[0]);
+ }
+ else
+ {
+ putop (dp->name, sizeflag);
+
+ obufp = op1out;
+ if (dp->op1)
+ (*dp->op1) (dp->bytemode1, sizeflag);
+ obufp = op2out;
+ if (dp->op2)
+ (*dp->op2) (dp->bytemode2, sizeflag);
+ }
+}
+
+static void
+OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ oappend ("%st");
+}
+
+static void
+OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ sprintf (scratchbuf, "%%st(%d)", rm);
+ oappend (scratchbuf + intel_syntax);
+}
+
+/* Capital letters in template are macros. */
+static int
+putop (const char *template, int sizeflag)
+{
+ const char *p;
+ int alt = 0;
+
+ for (p = template; *p; p++)
+ {
+ switch (*p)
+ {
+ default:
+ *obufp++ = *p;
+ break;
+ case '{':
+ alt = 0;
+ if (intel_syntax)
+ alt += 1;
+ if (mode_64bit)
+ alt += 2;
+ while (alt != 0)
+ {
+ while (*++p != '|')
+ {
+ if (*p == '}')
+ {
+ /* Alternative not valid. */
+ strcpy (obuf, "(bad)");
+ obufp = obuf + 5;
+ return 1;
+ }
+ else if (*p == '\0')
+ abort ();
+ }
+ alt--;
+ }
+ /* Fall through. */
+ case 'I':
+ alt = 1;
+ continue;
+ case '|':
+ while (*++p != '}')
+ {
+ if (*p == '\0')
+ abort ();
+ }
+ break;
+ case '}':
+ break;
+ case 'A':
+ if (intel_syntax)
+ break;
+ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
+ *obufp++ = 'b';
+ break;
+ case 'B':
+ if (intel_syntax)
+ break;
+ if (sizeflag & SUFFIX_ALWAYS)
+ *obufp++ = 'b';
+ break;
+ case 'C':
+ if (intel_syntax && !alt)
+ break;
+ if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS))
+ {
+ if (sizeflag & DFLAG)
+ *obufp++ = intel_syntax ? 'd' : 'l';
+ else
+ *obufp++ = intel_syntax ? 'w' : 's';
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ break;
+ case 'E': /* For jcxz/jecxz */
+ if (mode_64bit)
+ {
+ if (sizeflag & AFLAG)
+ *obufp++ = 'r';
+ else
+ *obufp++ = 'e';
+ }
+ else
+ if (sizeflag & AFLAG)
+ *obufp++ = 'e';
+ used_prefixes |= (prefixes & PREFIX_ADDR);
+ break;
+ case 'F':
+ if (intel_syntax)
+ break;
+ if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS))
+ {
+ if (sizeflag & AFLAG)
+ *obufp++ = mode_64bit ? 'q' : 'l';
+ else
+ *obufp++ = mode_64bit ? 'l' : 'w';
+ used_prefixes |= (prefixes & PREFIX_ADDR);
+ }
+ break;
+ case 'H':
+ if (intel_syntax)
+ break;
+ if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS
+ || (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS)
+ {
+ used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS);
+ *obufp++ = ',';
+ *obufp++ = 'p';
+ if (prefixes & PREFIX_DS)
+ *obufp++ = 't';
+ else
+ *obufp++ = 'n';
+ }
+ break;
+ case 'J':
+ if (intel_syntax)
+ break;
+ *obufp++ = 'l';
+ break;
+ case 'L':
+ if (intel_syntax)
+ break;
+ if (sizeflag & SUFFIX_ALWAYS)
+ *obufp++ = 'l';
+ break;
+ case 'N':
+ if ((prefixes & PREFIX_FWAIT) == 0)
+ *obufp++ = 'n';
+ else
+ used_prefixes |= PREFIX_FWAIT;
+ break;
+ case 'O':
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ *obufp++ = 'o';
+ else
+ *obufp++ = 'd';
+ break;
+ case 'T':
+ if (intel_syntax)
+ break;
+ if (mode_64bit)
+ {
+ *obufp++ = 'q';
+ break;
+ }
+ /* Fall through. */
+ case 'P':
+ if (intel_syntax)
+ break;
+ if ((prefixes & PREFIX_DATA)
+ || (rex & REX_MODE64)
+ || (sizeflag & SUFFIX_ALWAYS))
+ {
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ *obufp++ = 'q';
+ else
+ {
+ if (sizeflag & DFLAG)
+ *obufp++ = 'l';
+ else
+ *obufp++ = 'w';
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ }
+ break;
+ case 'U':
+ if (intel_syntax)
+ break;
+ if (mode_64bit)
+ {
+ *obufp++ = 'q';
+ break;
+ }
+ /* Fall through. */
+ case 'Q':
+ if (intel_syntax && !alt)
+ break;
+ USED_REX (REX_MODE64);
+ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
+ {
+ if (rex & REX_MODE64)
+ *obufp++ = 'q';
+ else
+ {
+ if (sizeflag & DFLAG)
+ *obufp++ = intel_syntax ? 'd' : 'l';
+ else
+ *obufp++ = 'w';
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ }
+ break;
+ case 'R':
+ USED_REX (REX_MODE64);
+ if (intel_syntax)
+ {
+ if (rex & REX_MODE64)
+ {
+ *obufp++ = 'q';
+ *obufp++ = 't';
+ }
+ else if (sizeflag & DFLAG)
+ {
+ *obufp++ = 'd';
+ *obufp++ = 'q';
+ }
+ else
+ {
+ *obufp++ = 'w';
+ *obufp++ = 'd';
+ }
+ }
+ else
+ {
+ if (rex & REX_MODE64)
+ *obufp++ = 'q';
+ else if (sizeflag & DFLAG)
+ *obufp++ = 'l';
+ else
+ *obufp++ = 'w';
+ }
+ if (!(rex & REX_MODE64))
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case 'S':
+ if (intel_syntax)
+ break;
+ if (sizeflag & SUFFIX_ALWAYS)
+ {
+ if (rex & REX_MODE64)
+ *obufp++ = 'q';
+ else
+ {
+ if (sizeflag & DFLAG)
+ *obufp++ = 'l';
+ else
+ *obufp++ = 'w';
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ }
+ break;
+ case 'X':
+ if (prefixes & PREFIX_DATA)
+ *obufp++ = 'd';
+ else
+ *obufp++ = 's';
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case 'Y':
+ if (intel_syntax)
+ break;
+ if (rex & REX_MODE64)
+ {
+ USED_REX (REX_MODE64);
+ *obufp++ = 'q';
+ }
+ break;
+ /* implicit operand size 'l' for i386 or 'q' for x86-64 */
+ case 'W':
+ /* operand size flag for cwtl, cbtw */
+ USED_REX (0);
+ if (rex)
+ *obufp++ = 'l';
+ else if (sizeflag & DFLAG)
+ *obufp++ = 'w';
+ else
+ *obufp++ = 'b';
+ if (intel_syntax)
+ {
+ if (rex)
+ {
+ *obufp++ = 'q';
+ *obufp++ = 'e';
+ }
+ if (sizeflag & DFLAG)
+ {
+ *obufp++ = 'd';
+ *obufp++ = 'e';
+ }
+ else
+ {
+ *obufp++ = 'w';
+ }
+ }
+ if (!rex)
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ }
+ alt = 0;
+ }
+ *obufp = 0;
+ return 0;
+}
+
+static void
+oappend (const char *s)
+{
+ strcpy (obufp, s);
+ obufp += strlen (s);
+}
+
+static void
+append_seg (void)
+{
+ if (prefixes & PREFIX_CS)
+ {
+ used_prefixes |= PREFIX_CS;
+ oappend ("%cs:" + intel_syntax);
+ }
+ if (prefixes & PREFIX_DS)
+ {
+ used_prefixes |= PREFIX_DS;
+ oappend ("%ds:" + intel_syntax);
+ }
+ if (prefixes & PREFIX_SS)
+ {
+ used_prefixes |= PREFIX_SS;
+ oappend ("%ss:" + intel_syntax);
+ }
+ if (prefixes & PREFIX_ES)
+ {
+ used_prefixes |= PREFIX_ES;
+ oappend ("%es:" + intel_syntax);
+ }
+ if (prefixes & PREFIX_FS)
+ {
+ used_prefixes |= PREFIX_FS;
+ oappend ("%fs:" + intel_syntax);
+ }
+ if (prefixes & PREFIX_GS)
+ {
+ used_prefixes |= PREFIX_GS;
+ oappend ("%gs:" + intel_syntax);
+ }
+}
+
+static void
+OP_indirE (int bytemode, int sizeflag)
+{
+ if (!intel_syntax)
+ oappend ("*");
+ OP_E (bytemode, sizeflag);
+}
+
+static void
+print_operand_value (char *buf, int hex, bfd_vma disp)
+{
+ if (mode_64bit)
+ {
+ if (hex)
+ {
+ char tmp[30];
+ int i;
+ buf[0] = '0';
+ buf[1] = 'x';
+ sprintf_vma (tmp, disp);
+ for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++);
+ strcpy (buf + 2, tmp + i);
+ }
+ else
+ {
+ bfd_signed_vma v = disp;
+ char tmp[30];
+ int i;
+ if (v < 0)
+ {
+ *(buf++) = '-';
+ v = -disp;
+ /* Check for possible overflow on 0x8000000000000000. */
+ if (v < 0)
+ {
+ strcpy (buf, "9223372036854775808");
+ return;
+ }
+ }
+ if (!v)
+ {
+ strcpy (buf, "0");
+ return;
+ }
+
+ i = 0;
+ tmp[29] = 0;
+ while (v)
+ {
+ tmp[28 - i] = (v % 10) + '0';
+ v /= 10;
+ i++;
+ }
+ strcpy (buf, tmp + 29 - i);
+ }
+ }
+ else
+ {
+ if (hex)
+ sprintf (buf, "0x%x", (unsigned int) disp);
+ else
+ sprintf (buf, "%d", (int) disp);
+ }
+}
+
+static void
+OP_E (int bytemode, int sizeflag)
+{
+ bfd_vma disp;
+ int add = 0;
+ int riprel = 0;
+ USED_REX (REX_EXTZ);
+ if (rex & REX_EXTZ)
+ add += 8;
+
+ /* Skip mod/rm byte. */
+ MODRM_CHECK;
+ codep++;
+
+ if (mod == 3)
+ {
+ switch (bytemode)
+ {
+ case b_mode:
+ USED_REX (0);
+ if (rex)
+ oappend (names8rex[rm + add]);
+ else
+ oappend (names8[rm + add]);
+ break;
+ case w_mode:
+ oappend (names16[rm + add]);
+ break;
+ case d_mode:
+ oappend (names32[rm + add]);
+ break;
+ case q_mode:
+ oappend (names64[rm + add]);
+ break;
+ case m_mode:
+ if (mode_64bit)
+ oappend (names64[rm + add]);
+ else
+ oappend (names32[rm + add]);
+ break;
+ case branch_v_mode:
+ if (mode_64bit)
+ oappend (names64[rm + add]);
+ else
+ {
+ if ((sizeflag & DFLAG) || bytemode != branch_v_mode)
+ oappend (names32[rm + add]);
+ else
+ oappend (names16[rm + add]);
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ break;
+ case v_mode:
+ case dq_mode:
+ case dqw_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ oappend (names64[rm + add]);
+ else if ((sizeflag & DFLAG) || bytemode != v_mode)
+ oappend (names32[rm + add]);
+ else
+ oappend (names16[rm + add]);
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case 0:
+ break;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ break;
+ }
+ return;
+ }
+
+ disp = 0;
+ append_seg ();
+
+ if ((sizeflag & AFLAG) || mode_64bit) /* 32 bit address mode */
+ {
+ int havesib;
+ int havebase;
+ int base;
+ int index = 0;
+ int scale = 0;
+
+ havesib = 0;
+ havebase = 1;
+ base = rm;
+
+ if (base == 4)
+ {
+ havesib = 1;
+ FETCH_DATA (the_info, codep + 1);
+ index = (*codep >> 3) & 7;
+ if (mode_64bit || index != 0x4)
+ /* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */
+ scale = (*codep >> 6) & 3;
+ base = *codep & 7;
+ USED_REX (REX_EXTY);
+ if (rex & REX_EXTY)
+ index += 8;
+ codep++;
+ }
+ base += add;
+
+ switch (mod)
+ {
+ case 0:
+ if ((base & 7) == 5)
+ {
+ havebase = 0;
+ if (mode_64bit && !havesib)
+ riprel = 1;
+ disp = get32s ();
+ }
+ break;
+ case 1:
+ FETCH_DATA (the_info, codep + 1);
+ disp = *codep++;
+ if ((disp & 0x80) != 0)
+ disp -= 0x100;
+ break;
+ case 2:
+ disp = get32s ();
+ break;
+ }
+
+ if (!intel_syntax)
+ if (mod != 0 || (base & 7) == 5)
+ {
+ print_operand_value (scratchbuf, !riprel, disp);
+ oappend (scratchbuf);
+ if (riprel)
+ {
+ set_op (disp, 1);
+ oappend ("(%rip)");
+ }
+ }
+
+ if (havebase || (havesib && (index != 4 || scale != 0)))
+ {
+ if (intel_syntax)
+ {
+ switch (bytemode)
+ {
+ case b_mode:
+ oappend ("BYTE PTR ");
+ break;
+ case w_mode:
+ case dqw_mode:
+ oappend ("WORD PTR ");
+ break;
+ case branch_v_mode:
+ case v_mode:
+ case dq_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ oappend ("QWORD PTR ");
+ else if ((sizeflag & DFLAG) || bytemode == dq_mode)
+ oappend ("DWORD PTR ");
+ else
+ oappend ("WORD PTR ");
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case d_mode:
+ oappend ("DWORD PTR ");
+ break;
+ case q_mode:
+ oappend ("QWORD PTR ");
+ break;
+ case m_mode:
+ if (mode_64bit)
+ oappend ("QWORD PTR ");
+ else
+ oappend ("DWORD PTR ");
+ break;
+ case f_mode:
+ if (sizeflag & DFLAG)
+ {
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ oappend ("FWORD PTR ");
+ }
+ else
+ oappend ("DWORD PTR ");
+ break;
+ case t_mode:
+ oappend ("TBYTE PTR ");
+ break;
+ case x_mode:
+ oappend ("XMMWORD PTR ");
+ break;
+ default:
+ break;
+ }
+ }
+ *obufp++ = open_char;
+ if (intel_syntax && riprel)
+ oappend ("rip + ");
+ *obufp = '\0';
+ if (havebase)
+ oappend (mode_64bit && (sizeflag & AFLAG)
+ ? names64[base] : names32[base]);
+ if (havesib)
+ {
+ if (index != 4)
+ {
+ if (!intel_syntax || havebase)
+ {
+ *obufp++ = separator_char;
+ *obufp = '\0';
+ }
+ oappend (mode_64bit && (sizeflag & AFLAG)
+ ? names64[index] : names32[index]);
+ }
+ if (scale != 0 || (!intel_syntax && index != 4))
+ {
+ *obufp++ = scale_char;
+ *obufp = '\0';
+ sprintf (scratchbuf, "%d", 1 << scale);
+ oappend (scratchbuf);
+ }
+ }
+ if (intel_syntax && disp)
+ {
+ if ((bfd_signed_vma) disp > 0)
+ {
+ *obufp++ = '+';
+ *obufp = '\0';
+ }
+ else if (mod != 1)
+ {
+ *obufp++ = '-';
+ *obufp = '\0';
+ disp = - (bfd_signed_vma) disp;
+ }
+
+ print_operand_value (scratchbuf, mod != 1, disp);
+ oappend (scratchbuf);
+ }
+
+ *obufp++ = close_char;
+ *obufp = '\0';
+ }
+ else if (intel_syntax)
+ {
+ if (mod != 0 || (base & 7) == 5)
+ {
+ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
+ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
+ ;
+ else
+ {
+ oappend (names_seg[ds_reg - es_reg]);
+ oappend (":");
+ }
+ print_operand_value (scratchbuf, 1, disp);
+ oappend (scratchbuf);
+ }
+ }
+ }
+ else
+ { /* 16 bit address mode */
+ switch (mod)
+ {
+ case 0:
+ if (rm == 6)
+ {
+ disp = get16 ();
+ if ((disp & 0x8000) != 0)
+ disp -= 0x10000;
+ }
+ break;
+ case 1:
+ FETCH_DATA (the_info, codep + 1);
+ disp = *codep++;
+ if ((disp & 0x80) != 0)
+ disp -= 0x100;
+ break;
+ case 2:
+ disp = get16 ();
+ if ((disp & 0x8000) != 0)
+ disp -= 0x10000;
+ break;
+ }
+
+ if (!intel_syntax)
+ if (mod != 0 || rm == 6)
+ {
+ print_operand_value (scratchbuf, 0, disp);
+ oappend (scratchbuf);
+ }
+
+ if (mod != 0 || rm != 6)
+ {
+ *obufp++ = open_char;
+ *obufp = '\0';
+ oappend (index16[rm]);
+ if (intel_syntax && disp)
+ {
+ if ((bfd_signed_vma) disp > 0)
+ {
+ *obufp++ = '+';
+ *obufp = '\0';
+ }
+ else if (mod != 1)
+ {
+ *obufp++ = '-';
+ *obufp = '\0';
+ disp = - (bfd_signed_vma) disp;
+ }
+
+ print_operand_value (scratchbuf, mod != 1, disp);
+ oappend (scratchbuf);
+ }
+
+ *obufp++ = close_char;
+ *obufp = '\0';
+ }
+ else if (intel_syntax)
+ {
+ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
+ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
+ ;
+ else
+ {
+ oappend (names_seg[ds_reg - es_reg]);
+ oappend (":");
+ }
+ print_operand_value (scratchbuf, 1, disp & 0xffff);
+ oappend (scratchbuf);
+ }
+ }
+}
+
+static void
+OP_G (int bytemode, int sizeflag)
+{
+ int add = 0;
+ USED_REX (REX_EXTX);
+ if (rex & REX_EXTX)
+ add += 8;
+ switch (bytemode)
+ {
+ case b_mode:
+ USED_REX (0);
+ if (rex)
+ oappend (names8rex[reg + add]);
+ else
+ oappend (names8[reg + add]);
+ break;
+ case w_mode:
+ oappend (names16[reg + add]);
+ break;
+ case d_mode:
+ oappend (names32[reg + add]);
+ break;
+ case q_mode:
+ oappend (names64[reg + add]);
+ break;
+ case v_mode:
+ case dq_mode:
+ case dqw_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ oappend (names64[reg + add]);
+ else if ((sizeflag & DFLAG) || bytemode != v_mode)
+ oappend (names32[reg + add]);
+ else
+ oappend (names16[reg + add]);
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case m_mode:
+ if (mode_64bit)
+ oappend (names64[reg + add]);
+ else
+ oappend (names32[reg + add]);
+ break;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ break;
+ }
+}
+
+static bfd_vma
+get64 (void)
+{
+ bfd_vma x;
+#ifdef BFD64
+ unsigned int a;
+ unsigned int b;
+
+ FETCH_DATA (the_info, codep + 8);
+ a = *codep++ & 0xff;
+ a |= (*codep++ & 0xff) << 8;
+ a |= (*codep++ & 0xff) << 16;
+ a |= (*codep++ & 0xff) << 24;
+ b = *codep++ & 0xff;
+ b |= (*codep++ & 0xff) << 8;
+ b |= (*codep++ & 0xff) << 16;
+ b |= (*codep++ & 0xff) << 24;
+ x = a + ((bfd_vma) b << 32);
+#else
+ abort ();
+ x = 0;
+#endif
+ return x;
+}
+
+static bfd_signed_vma
+get32 (void)
+{
+ bfd_signed_vma x = 0;
+
+ FETCH_DATA (the_info, codep + 4);
+ x = *codep++ & (bfd_signed_vma) 0xff;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
+ return x;
+}
+
+static bfd_signed_vma
+get32s (void)
+{
+ bfd_signed_vma x = 0;
+
+ FETCH_DATA (the_info, codep + 4);
+ x = *codep++ & (bfd_signed_vma) 0xff;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
+ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
+
+ x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31);
+
+ return x;
+}
+
+static int
+get16 (void)
+{
+ int x = 0;
+
+ FETCH_DATA (the_info, codep + 2);
+ x = *codep++ & 0xff;
+ x |= (*codep++ & 0xff) << 8;
+ return x;
+}
+
+static void
+set_op (bfd_vma op, int riprel)
+{
+ op_index[op_ad] = op_ad;
+ if (mode_64bit)
+ {
+ op_address[op_ad] = op;
+ op_riprel[op_ad] = riprel;
+ }
+ else
+ {
+ /* Mask to get a 32-bit address. */
+ op_address[op_ad] = op & 0xffffffff;
+ op_riprel[op_ad] = riprel & 0xffffffff;
+ }
+}
+
+static void
+OP_REG (int code, int sizeflag)
+{
+ const char *s;
+ int add = 0;
+ USED_REX (REX_EXTZ);
+ if (rex & REX_EXTZ)
+ add = 8;
+
+ switch (code)
+ {
+ case indir_dx_reg:
+ if (intel_syntax)
+ s = "[dx]";
+ else
+ s = "(%dx)";
+ break;
+ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
+ case sp_reg: case bp_reg: case si_reg: case di_reg:
+ s = names16[code - ax_reg + add];
+ break;
+ case es_reg: case ss_reg: case cs_reg:
+ case ds_reg: case fs_reg: case gs_reg:
+ s = names_seg[code - es_reg + add];
+ break;
+ case al_reg: case ah_reg: case cl_reg: case ch_reg:
+ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
+ USED_REX (0);
+ if (rex)
+ s = names8rex[code - al_reg + add];
+ else
+ s = names8[code - al_reg];
+ break;
+ case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg:
+ case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg:
+ if (mode_64bit)
+ {
+ s = names64[code - rAX_reg + add];
+ break;
+ }
+ code += eAX_reg - rAX_reg;
+ /* Fall through. */
+ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
+ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ s = names64[code - eAX_reg + add];
+ else if (sizeflag & DFLAG)
+ s = names32[code - eAX_reg + add];
+ else
+ s = names16[code - eAX_reg + add];
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ default:
+ s = INTERNAL_DISASSEMBLER_ERROR;
+ break;
+ }
+ oappend (s);
+}
+
+static void
+OP_IMREG (int code, int sizeflag)
+{
+ const char *s;
+
+ switch (code)
+ {
+ case indir_dx_reg:
+ if (intel_syntax)
+ s = "[dx]";
+ else
+ s = "(%dx)";
+ break;
+ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
+ case sp_reg: case bp_reg: case si_reg: case di_reg:
+ s = names16[code - ax_reg];
+ break;
+ case es_reg: case ss_reg: case cs_reg:
+ case ds_reg: case fs_reg: case gs_reg:
+ s = names_seg[code - es_reg];
+ break;
+ case al_reg: case ah_reg: case cl_reg: case ch_reg:
+ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
+ USED_REX (0);
+ if (rex)
+ s = names8rex[code - al_reg];
+ else
+ s = names8[code - al_reg];
+ break;
+ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
+ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ s = names64[code - eAX_reg];
+ else if (sizeflag & DFLAG)
+ s = names32[code - eAX_reg];
+ else
+ s = names16[code - eAX_reg];
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ default:
+ s = INTERNAL_DISASSEMBLER_ERROR;
+ break;
+ }
+ oappend (s);
+}
+
+static void
+OP_I (int bytemode, int sizeflag)
+{
+ bfd_signed_vma op;
+ bfd_signed_vma mask = -1;
+
+ switch (bytemode)
+ {
+ case b_mode:
+ FETCH_DATA (the_info, codep + 1);
+ op = *codep++;
+ mask = 0xff;
+ break;
+ case q_mode:
+ if (mode_64bit)
+ {
+ op = get32s ();
+ break;
+ }
+ /* Fall through. */
+ case v_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ op = get32s ();
+ else if (sizeflag & DFLAG)
+ {
+ op = get32 ();
+ mask = 0xffffffff;
+ }
+ else
+ {
+ op = get16 ();
+ mask = 0xfffff;
+ }
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case w_mode:
+ mask = 0xfffff;
+ op = get16 ();
+ break;
+ case const_1_mode:
+ if (intel_syntax)
+ oappend ("1");
+ return;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ return;
+ }
+
+ op &= mask;
+ scratchbuf[0] = '$';
+ print_operand_value (scratchbuf + 1, 1, op);
+ oappend (scratchbuf + intel_syntax);
+ scratchbuf[0] = '\0';
+}
+
+static void
+OP_I64 (int bytemode, int sizeflag)
+{
+ bfd_signed_vma op;
+ bfd_signed_vma mask = -1;
+
+ if (!mode_64bit)
+ {
+ OP_I (bytemode, sizeflag);
+ return;
+ }
+
+ switch (bytemode)
+ {
+ case b_mode:
+ FETCH_DATA (the_info, codep + 1);
+ op = *codep++;
+ mask = 0xff;
+ break;
+ case v_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ op = get64 ();
+ else if (sizeflag & DFLAG)
+ {
+ op = get32 ();
+ mask = 0xffffffff;
+ }
+ else
+ {
+ op = get16 ();
+ mask = 0xfffff;
+ }
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case w_mode:
+ mask = 0xfffff;
+ op = get16 ();
+ break;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ return;
+ }
+
+ op &= mask;
+ scratchbuf[0] = '$';
+ print_operand_value (scratchbuf + 1, 1, op);
+ oappend (scratchbuf + intel_syntax);
+ scratchbuf[0] = '\0';
+}
+
+static void
+OP_sI (int bytemode, int sizeflag)
+{
+ bfd_signed_vma op;
+ bfd_signed_vma mask = -1;
+
+ switch (bytemode)
+ {
+ case b_mode:
+ FETCH_DATA (the_info, codep + 1);
+ op = *codep++;
+ if ((op & 0x80) != 0)
+ op -= 0x100;
+ mask = 0xffffffff;
+ break;
+ case v_mode:
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ op = get32s ();
+ else if (sizeflag & DFLAG)
+ {
+ op = get32s ();
+ mask = 0xffffffff;
+ }
+ else
+ {
+ mask = 0xffffffff;
+ op = get16 ();
+ if ((op & 0x8000) != 0)
+ op -= 0x10000;
+ }
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ break;
+ case w_mode:
+ op = get16 ();
+ mask = 0xffffffff;
+ if ((op & 0x8000) != 0)
+ op -= 0x10000;
+ break;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ return;
+ }
+
+ scratchbuf[0] = '$';
+ print_operand_value (scratchbuf + 1, 1, op);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_J (int bytemode, int sizeflag)
+{
+ bfd_vma disp;
+ bfd_vma mask = -1;
+
+ switch (bytemode)
+ {
+ case b_mode:
+ FETCH_DATA (the_info, codep + 1);
+ disp = *codep++;
+ if ((disp & 0x80) != 0)
+ disp -= 0x100;
+ break;
+ case v_mode:
+ if (sizeflag & DFLAG)
+ disp = get32s ();
+ else
+ {
+ disp = get16 ();
+ /* For some reason, a data16 prefix on a jump instruction
+ means that the pc is masked to 16 bits after the
+ displacement is added! */
+ mask = 0xffff;
+ }
+ break;
+ default:
+ oappend (INTERNAL_DISASSEMBLER_ERROR);
+ return;
+ }
+ disp = (start_pc + codep - start_codep + disp) & mask;
+ set_op (disp, 0);
+ print_operand_value (scratchbuf, 1, disp);
+ oappend (scratchbuf);
+}
+
+static void
+OP_SEG (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ oappend (names_seg[reg]);
+}
+
+static void
+OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag)
+{
+ int seg, offset;
+
+ if (sizeflag & DFLAG)
+ {
+ offset = get32 ();
+ seg = get16 ();
+ }
+ else
+ {
+ offset = get16 ();
+ seg = get16 ();
+ }
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (intel_syntax)
+ sprintf (scratchbuf, "0x%x,0x%x", seg, offset);
+ else
+ sprintf (scratchbuf, "$0x%x,$0x%x", seg, offset);
+ oappend (scratchbuf);
+}
+
+static void
+OP_OFF (int bytemode ATTRIBUTE_UNUSED, int sizeflag)
+{
+ bfd_vma off;
+
+ append_seg ();
+
+ if ((sizeflag & AFLAG) || mode_64bit)
+ off = get32 ();
+ else
+ off = get16 ();
+
+ if (intel_syntax)
+ {
+ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
+ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
+ {
+ oappend (names_seg[ds_reg - es_reg]);
+ oappend (":");
+ }
+ }
+ print_operand_value (scratchbuf, 1, off);
+ oappend (scratchbuf);
+}
+
+static void
+OP_OFF64 (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ bfd_vma off;
+
+ if (!mode_64bit)
+ {
+ OP_OFF (bytemode, sizeflag);
+ return;
+ }
+
+ append_seg ();
+
+ off = get64 ();
+
+ if (intel_syntax)
+ {
+ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
+ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
+ {
+ oappend (names_seg[ds_reg - es_reg]);
+ oappend (":");
+ }
+ }
+ print_operand_value (scratchbuf, 1, off);
+ oappend (scratchbuf);
+}
+
+static void
+ptr_reg (int code, int sizeflag)
+{
+ const char *s;
+
+ *obufp++ = open_char;
+ used_prefixes |= (prefixes & PREFIX_ADDR);
+ if (mode_64bit)
+ {
+ if (!(sizeflag & AFLAG))
+ s = names32[code - eAX_reg];
+ else
+ s = names64[code - eAX_reg];
+ }
+ else if (sizeflag & AFLAG)
+ s = names32[code - eAX_reg];
+ else
+ s = names16[code - eAX_reg];
+ oappend (s);
+ *obufp++ = close_char;
+ *obufp = 0;
+}
+
+static void
+OP_ESreg (int code, int sizeflag)
+{
+ if (intel_syntax)
+ {
+ if (codep[-1] & 1)
+ {
+ USED_REX (REX_MODE64);
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (rex & REX_MODE64)
+ oappend ("QWORD PTR ");
+ else if ((sizeflag & DFLAG))
+ oappend ("DWORD PTR ");
+ else
+ oappend ("WORD PTR ");
+ }
+ else
+ oappend ("BYTE PTR ");
+ }
+
+ oappend ("%es:" + intel_syntax);
+ ptr_reg (code, sizeflag);
+}
+
+static void
+OP_DSreg (int code, int sizeflag)
+{
+ if (intel_syntax)
+ {
+ if (codep[-1] != 0xd7 && (codep[-1] & 1))
+ {
+ USED_REX (REX_MODE64);
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (rex & REX_MODE64)
+ oappend ("QWORD PTR ");
+ else if ((sizeflag & DFLAG))
+ oappend ("DWORD PTR ");
+ else
+ oappend ("WORD PTR ");
+ }
+ else
+ oappend ("BYTE PTR ");
+ }
+
+ if ((prefixes
+ & (PREFIX_CS
+ | PREFIX_DS
+ | PREFIX_SS
+ | PREFIX_ES
+ | PREFIX_FS
+ | PREFIX_GS)) == 0)
+ prefixes |= PREFIX_DS;
+ append_seg ();
+ ptr_reg (code, sizeflag);
+}
+
+static void
+OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ int add = 0;
+ if (rex & REX_EXTX)
+ {
+ USED_REX (REX_EXTX);
+ add = 8;
+ }
+ else if (!mode_64bit && (prefixes & PREFIX_LOCK))
+ {
+ used_prefixes |= PREFIX_LOCK;
+ add = 8;
+ }
+ sprintf (scratchbuf, "%%cr%d", reg + add);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ int add = 0;
+ USED_REX (REX_EXTX);
+ if (rex & REX_EXTX)
+ add = 8;
+ if (intel_syntax)
+ sprintf (scratchbuf, "db%d", reg + add);
+ else
+ sprintf (scratchbuf, "%%db%d", reg + add);
+ oappend (scratchbuf);
+}
+
+static void
+OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ sprintf (scratchbuf, "%%tr%d", reg);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_Rd (int bytemode, int sizeflag)
+{
+ if (mod == 3)
+ OP_E (bytemode, sizeflag);
+ else
+ BadOp ();
+}
+
+static void
+OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (prefixes & PREFIX_DATA)
+ {
+ int add = 0;
+ USED_REX (REX_EXTX);
+ if (rex & REX_EXTX)
+ add = 8;
+ sprintf (scratchbuf, "%%xmm%d", reg + add);
+ }
+ else
+ sprintf (scratchbuf, "%%mm%d", reg);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ int add = 0;
+ USED_REX (REX_EXTX);
+ if (rex & REX_EXTX)
+ add = 8;
+ sprintf (scratchbuf, "%%xmm%d", reg + add);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_EM (int bytemode, int sizeflag)
+{
+ if (mod != 3)
+ {
+ if (intel_syntax && bytemode == v_mode)
+ {
+ bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode;
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ }
+ OP_E (bytemode, sizeflag);
+ return;
+ }
+
+ /* Skip mod/rm byte. */
+ MODRM_CHECK;
+ codep++;
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (prefixes & PREFIX_DATA)
+ {
+ int add = 0;
+
+ USED_REX (REX_EXTZ);
+ if (rex & REX_EXTZ)
+ add = 8;
+ sprintf (scratchbuf, "%%xmm%d", rm + add);
+ }
+ else
+ sprintf (scratchbuf, "%%mm%d", rm);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_EX (int bytemode, int sizeflag)
+{
+ int add = 0;
+ if (mod != 3)
+ {
+ if (intel_syntax && bytemode == v_mode)
+ {
+ switch (prefixes & (PREFIX_DATA|PREFIX_REPZ|PREFIX_REPNZ))
+ {
+ case 0: bytemode = x_mode; break;
+ case PREFIX_REPZ: bytemode = d_mode; used_prefixes |= PREFIX_REPZ; break;
+ case PREFIX_DATA: bytemode = x_mode; used_prefixes |= PREFIX_DATA; break;
+ case PREFIX_REPNZ: bytemode = q_mode; used_prefixes |= PREFIX_REPNZ; break;
+ default: bytemode = 0; break;
+ }
+ }
+ OP_E (bytemode, sizeflag);
+ return;
+ }
+ USED_REX (REX_EXTZ);
+ if (rex & REX_EXTZ)
+ add = 8;
+
+ /* Skip mod/rm byte. */
+ MODRM_CHECK;
+ codep++;
+ sprintf (scratchbuf, "%%xmm%d", rm + add);
+ oappend (scratchbuf + intel_syntax);
+}
+
+static void
+OP_MS (int bytemode, int sizeflag)
+{
+ if (mod == 3)
+ OP_EM (bytemode, sizeflag);
+ else
+ BadOp ();
+}
+
+static void
+OP_XS (int bytemode, int sizeflag)
+{
+ if (mod == 3)
+ OP_EX (bytemode, sizeflag);
+ else
+ BadOp ();
+}
+
+static void
+OP_M (int bytemode, int sizeflag)
+{
+ if (mod == 3)
+ BadOp (); /* bad lea,lds,les,lfs,lgs,lss modrm */
+ else
+ OP_E (bytemode, sizeflag);
+}
+
+static void
+OP_0f07 (int bytemode, int sizeflag)
+{
+ if (mod != 3 || rm != 0)
+ BadOp ();
+ else
+ OP_E (bytemode, sizeflag);
+}
+
+static void
+OP_0fae (int bytemode, int sizeflag)
+{
+ if (mod == 3)
+ {
+ if (reg == 5)
+ strcpy (obuf + strlen (obuf) - sizeof ("xrstor") + 1, "lfence");
+ if (reg == 7)
+ strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence");
+
+ if (reg < 5 || rm != 0)
+ {
+ BadOp (); /* bad sfence, mfence, or lfence */
+ return;
+ }
+ }
+ else if (reg != 5 && reg != 7)
+ {
+ BadOp (); /* bad xrstor or clflush */
+ return;
+ }
+
+ OP_E (bytemode, sizeflag);
+}
+
+static void
+NOP_Fixup (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ /* NOP with REPZ prefix is called PAUSE. */
+ if (prefixes == PREFIX_REPZ)
+ strcpy (obuf, "pause");
+}
+
+static const char *const Suffix3DNow[] = {
+/* 00 */ NULL, NULL, NULL, NULL,
+/* 04 */ NULL, NULL, NULL, NULL,
+/* 08 */ NULL, NULL, NULL, NULL,
+/* 0C */ "pi2fw", "pi2fd", NULL, NULL,
+/* 10 */ NULL, NULL, NULL, NULL,
+/* 14 */ NULL, NULL, NULL, NULL,
+/* 18 */ NULL, NULL, NULL, NULL,
+/* 1C */ "pf2iw", "pf2id", NULL, NULL,
+/* 20 */ NULL, NULL, NULL, NULL,
+/* 24 */ NULL, NULL, NULL, NULL,
+/* 28 */ NULL, NULL, NULL, NULL,
+/* 2C */ NULL, NULL, NULL, NULL,
+/* 30 */ NULL, NULL, NULL, NULL,
+/* 34 */ NULL, NULL, NULL, NULL,
+/* 38 */ NULL, NULL, NULL, NULL,
+/* 3C */ NULL, NULL, NULL, NULL,
+/* 40 */ NULL, NULL, NULL, NULL,
+/* 44 */ NULL, NULL, NULL, NULL,
+/* 48 */ NULL, NULL, NULL, NULL,
+/* 4C */ NULL, NULL, NULL, NULL,
+/* 50 */ NULL, NULL, NULL, NULL,
+/* 54 */ NULL, NULL, NULL, NULL,
+/* 58 */ NULL, NULL, NULL, NULL,
+/* 5C */ NULL, NULL, NULL, NULL,
+/* 60 */ NULL, NULL, NULL, NULL,
+/* 64 */ NULL, NULL, NULL, NULL,
+/* 68 */ NULL, NULL, NULL, NULL,
+/* 6C */ NULL, NULL, NULL, NULL,
+/* 70 */ NULL, NULL, NULL, NULL,
+/* 74 */ NULL, NULL, NULL, NULL,
+/* 78 */ NULL, NULL, NULL, NULL,
+/* 7C */ NULL, NULL, NULL, NULL,
+/* 80 */ NULL, NULL, NULL, NULL,
+/* 84 */ NULL, NULL, NULL, NULL,
+/* 88 */ NULL, NULL, "pfnacc", NULL,
+/* 8C */ NULL, NULL, "pfpnacc", NULL,
+/* 90 */ "pfcmpge", NULL, NULL, NULL,
+/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt",
+/* 98 */ NULL, NULL, "pfsub", NULL,
+/* 9C */ NULL, NULL, "pfadd", NULL,
+/* A0 */ "pfcmpgt", NULL, NULL, NULL,
+/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1",
+/* A8 */ NULL, NULL, "pfsubr", NULL,
+/* AC */ NULL, NULL, "pfacc", NULL,
+/* B0 */ "pfcmpeq", NULL, NULL, NULL,
+/* B4 */ "pfmul", NULL, "pfrcpit2", "pfmulhrw",
+/* B8 */ NULL, NULL, NULL, "pswapd",
+/* BC */ NULL, NULL, NULL, "pavgusb",
+/* C0 */ NULL, NULL, NULL, NULL,
+/* C4 */ NULL, NULL, NULL, NULL,
+/* C8 */ NULL, NULL, NULL, NULL,
+/* CC */ NULL, NULL, NULL, NULL,
+/* D0 */ NULL, NULL, NULL, NULL,
+/* D4 */ NULL, NULL, NULL, NULL,
+/* D8 */ NULL, NULL, NULL, NULL,
+/* DC */ NULL, NULL, NULL, NULL,
+/* E0 */ NULL, NULL, NULL, NULL,
+/* E4 */ NULL, NULL, NULL, NULL,
+/* E8 */ NULL, NULL, NULL, NULL,
+/* EC */ NULL, NULL, NULL, NULL,
+/* F0 */ NULL, NULL, NULL, NULL,
+/* F4 */ NULL, NULL, NULL, NULL,
+/* F8 */ NULL, NULL, NULL, NULL,
+/* FC */ NULL, NULL, NULL, NULL,
+};
+
+static void
+OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ const char *mnemonic;
+
+ FETCH_DATA (the_info, codep + 1);
+ /* AMD 3DNow! instructions are specified by an opcode suffix in the
+ place where an 8-bit immediate would normally go. ie. the last
+ byte of the instruction. */
+ obufp = obuf + strlen (obuf);
+ mnemonic = Suffix3DNow[*codep++ & 0xff];
+ if (mnemonic)
+ oappend (mnemonic);
+ else
+ {
+ /* Since a variable sized modrm/sib chunk is between the start
+ of the opcode (0x0f0f) and the opcode suffix, we need to do
+ all the modrm processing first, and don't know until now that
+ we have a bad opcode. This necessitates some cleaning up. */
+ op1out[0] = '\0';
+ op2out[0] = '\0';
+ BadOp ();
+ }
+}
+
+static const char *simd_cmp_op[] = {
+ "eq",
+ "lt",
+ "le",
+ "unord",
+ "neq",
+ "nlt",
+ "nle",
+ "ord"
+};
+
+static void
+OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
+{
+ unsigned int cmp_type;
+
+ FETCH_DATA (the_info, codep + 1);
+ obufp = obuf + strlen (obuf);
+ cmp_type = *codep++ & 0xff;
+ if (cmp_type < 8)
+ {
+ char suffix1 = 'p', suffix2 = 's';
+ used_prefixes |= (prefixes & PREFIX_REPZ);
+ if (prefixes & PREFIX_REPZ)
+ suffix1 = 's';
+ else
+ {
+ used_prefixes |= (prefixes & PREFIX_DATA);
+ if (prefixes & PREFIX_DATA)
+ suffix2 = 'd';
+ else
+ {
+ used_prefixes |= (prefixes & PREFIX_REPNZ);
+ if (prefixes & PREFIX_REPNZ)
+ suffix1 = 's', suffix2 = 'd';
+ }
+ }
+ sprintf (scratchbuf, "cmp%s%c%c",
+ simd_cmp_op[cmp_type], suffix1, suffix2);
+ used_prefixes |= (prefixes & PREFIX_REPZ);
+ oappend (scratchbuf);
+ }
+ else
+ {
+ /* We have a bad extension byte. Clean up. */
+ op1out[0] = '\0';
+ op2out[0] = '\0';
+ BadOp ();
+ }
+}
+
+static void
+SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED)
+{
+ /* Change movlps/movhps to movhlps/movlhps for 2 register operand
+ forms of these instructions. */
+ if (mod == 3)
+ {
+ char *p = obuf + strlen (obuf);
+ *(p + 1) = '\0';
+ *p = *(p - 1);
+ *(p - 1) = *(p - 2);
+ *(p - 2) = *(p - 3);
+ *(p - 3) = extrachar;
+ }
+}
+
+static void
+PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
+{
+ if (mod == 3 && reg == 1 && rm <= 1)
+ {
+ /* Override "sidt". */
+ char *p = obuf + strlen (obuf) - 4;
+
+ /* We might have a suffix when disassembling with -Msuffix. */
+ if (*p == 'i')
+ --p;
+
+ if (rm)
+ {
+ /* mwait %eax,%ecx */
+ strcpy (p, "mwait");
+ if (!intel_syntax)
+ strcpy (op1out, names32[0]);
+ }
+ else
+ {
+ /* monitor %eax,%ecx,%edx" */
+ strcpy (p, "monitor");
+ if (!intel_syntax)
+ {
+ if (!mode_64bit)
+ strcpy (op1out, names32[0]);
+ else if (!(prefixes & PREFIX_ADDR))
+ strcpy (op1out, names64[0]);
+ else
+ {
+ strcpy (op1out, names32[0]);
+ used_prefixes |= PREFIX_ADDR;
+ }
+ strcpy (op3out, names32[2]);
+ }
+ }
+ if (!intel_syntax)
+ {
+ strcpy (op2out, names32[1]);
+ two_source_ops = 1;
+ }
+
+ codep++;
+ }
+ else
+ OP_M (0, sizeflag);
+}
+
+static void
+SVME_Fixup (int bytemode, int sizeflag)
+{
+ const char *alt;
+ char *p;
+
+ switch (*codep)
+ {
+ case 0xd8:
+ alt = "vmrun";
+ break;
+ case 0xd9:
+ alt = "vmmcall";
+ break;
+ case 0xda:
+ alt = "vmload";
+ break;
+ case 0xdb:
+ alt = "vmsave";
+ break;
+ case 0xdc:
+ alt = "stgi";
+ break;
+ case 0xdd:
+ alt = "clgi";
+ break;
+ case 0xde:
+ alt = "skinit";
+ break;
+ case 0xdf:
+ alt = "invlpga";
+ break;
+ default:
+ OP_M (bytemode, sizeflag);
+ return;
+ }
+ /* Override "lidt". */
+ p = obuf + strlen (obuf) - 4;
+ /* We might have a suffix. */
+ if (*p == 'i')
+ --p;
+ strcpy (p, alt);
+ if (!(prefixes & PREFIX_ADDR))
+ {
+ ++codep;
+ return;
+ }
+ used_prefixes |= PREFIX_ADDR;
+ switch (*codep++)
+ {
+ case 0xdf:
+ strcpy (op2out, names32[1]);
+ two_source_ops = 1;
+ /* Fall through. */
+ case 0xd8:
+ case 0xda:
+ case 0xdb:
+ *obufp++ = open_char;
+ if (mode_64bit || (sizeflag & AFLAG))
+ alt = names32[0];
+ else
+ alt = names16[0];
+ strcpy (obufp, alt);
+ obufp += strlen (alt);
+ *obufp++ = close_char;
+ *obufp = '\0';
+ break;
+ }
+}
+
+static void
+INVLPG_Fixup (int bytemode, int sizeflag)
+{
+ const char *alt;
+
+ switch (*codep)
+ {
+ case 0xf8:
+ alt = "swapgs";
+ break;
+ case 0xf9:
+ alt = "rdtscp";
+ break;
+ default:
+ OP_M (bytemode, sizeflag);
+ return;
+ }
+ /* Override "invlpg". */
+ strcpy (obuf + strlen (obuf) - 6, alt);
+ codep++;
+}
+
+static void
+BadOp (void)
+{
+ /* Throw away prefixes and 1st. opcode byte. */
+ codep = insn_codep + 1;
+ oappend ("(bad)");
+}
+
+static void
+SEG_Fixup (int extrachar, int sizeflag)
+{
+ if (mod == 3)
+ {
+ /* We need to add a proper suffix with
+
+ movw %ds,%ax
+ movl %ds,%eax
+ movq %ds,%rax
+ movw %ax,%ds
+ movl %eax,%ds
+ movq %rax,%ds
+ */
+ const char *suffix;
+
+ if (prefixes & PREFIX_DATA)
+ suffix = "w";
+ else
+ {
+ USED_REX (REX_MODE64);
+ if (rex & REX_MODE64)
+ suffix = "q";
+ else
+ suffix = "l";
+ }
+ strcat (obuf, suffix);
+ }
+ else
+ {
+ /* We need to fix the suffix for
+
+ movw %ds,(%eax)
+ movw %ds,(%rax)
+ movw (%eax),%ds
+ movw (%rax),%ds
+
+ Override "mov[l|q]". */
+ char *p = obuf + strlen (obuf) - 1;
+
+ /* We might not have a suffix. */
+ if (*p == 'v')
+ ++p;
+ *p = 'w';
+ }
+
+ OP_E (extrachar, sizeflag);
+}
+
+static void
+VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
+{
+ if (mod == 3 && reg == 0 && rm >=1 && rm <= 4)
+ {
+ /* Override "sgdt". */
+ char *p = obuf + strlen (obuf) - 4;
+
+ /* We might have a suffix when disassembling with -Msuffix. */
+ if (*p == 'g')
+ --p;
+
+ switch (rm)
+ {
+ case 1:
+ strcpy (p, "vmcall");
+ break;
+ case 2:
+ strcpy (p, "vmlaunch");
+ break;
+ case 3:
+ strcpy (p, "vmresume");
+ break;
+ case 4:
+ strcpy (p, "vmxoff");
+ break;
+ }
+
+ codep++;
+ }
+ else
+ OP_E (0, sizeflag);
+}
+
+static void
+OP_VMX (int bytemode, int sizeflag)
+{
+ used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ));
+ if (prefixes & PREFIX_DATA)
+ strcpy (obuf, "vmclear");
+ else if (prefixes & PREFIX_REPZ)
+ strcpy (obuf, "vmxon");
+ else
+ strcpy (obuf, "vmptrld");
+ OP_E (bytemode, sizeflag);
+}
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -42,6 +42,9 @@
#include <linux/bootmem.h>
#include <linux/dmar.h>
#include <linux/hpet.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif
#include <asm/idle.h>
#include <asm/io.h>
@@ -1193,6 +1196,11 @@ next:
if (test_bit(vector, used_vectors))
goto next;
+#ifdef CONFIG_KDB
+ if (vector == KDBENTER_VECTOR)
+ goto next;
+#endif
+
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next;
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -15,6 +15,9 @@
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif
#include <asm/stacktrace.h>
@@ -260,6 +263,9 @@ void __kprobes oops_end(unsigned long fl
/* Nest count reaches zero, release the lock. */
arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
+#ifdef CONFIG_KB
+ kdb(KDB_REASON_OOPS, signr, regs);
+#endif
oops_exit();
if (!signr)
@@ -328,6 +334,9 @@ void die(const char *str, struct pt_regs
if (__die(str, regs, err))
sig = 0;
+#ifdef CONFIG_KDB
+ kdb_diemsg = str;
+#endif
oops_end(flags, regs, sig);
}
@@ -348,6 +357,9 @@ die_nmi(char *str, struct pt_regs *regs,
printk(" on CPU%d, ip %08lx, registers:\n",
smp_processor_id(), regs->ip);
show_registers(regs);
+#ifdef CONFIG_KDB
+ kdb(KDB_REASON_NMI, 0, regs);
+#endif
oops_end(flags, regs, 0);
if (do_panic || panic_on_oops)
panic("Non maskable interrupt");
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1008,6 +1008,26 @@ ENTRY(alignment_check)
CFI_ENDPROC
END(alignment_check)
+#ifdef CONFIG_KDB
+
+ENTRY(kdb_call)
+ RING0_INT_FRAME
+ pushl %eax # save orig EAX
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ movl %esp,%ecx # struct pt_regs
+ movl $0,%edx # error_code
+ movl $1,%eax # KDB_REASON_ENTER
+ call kdb
+ jmp restore_all
+ CFI_ENDPROC
+
+#ifdef CONFIG_SMP
+BUILD_INTERRUPT(kdb_interrupt,KDB_VECTOR)
+#endif /* CONFIG_SMP */
+
+#endif /* CONFIG_KDB */
+
ENTRY(divide_error)
RING0_INT_FRAME
pushl $0 # no error code
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1331,6 +1331,33 @@ END(xen_failsafe_callback)
#endif /* CONFIG_XEN */
+#ifdef CONFIG_KDB
+
+#ifdef CONFIG_SMP
+apicinterrupt KDB_VECTOR \
+ kdb_interrupt, smp_kdb_interrupt
+#endif /* CONFIG_SMP */
+
+ENTRY(kdb_call)
+ INTR_FRAME
+ cld
+ pushq $-1 # orig_eax
+ CFI_ADJUST_CFA_OFFSET 8
+ SAVE_ALL
+ movq $1,%rdi # KDB_REASON_ENTER
+ movq $0,%rsi # error_code
+ movq %rsp,%rdx # struct pt_regs
+ call kdb
+ RESTORE_ALL
+ addq $8,%rsp # forget orig_eax
+ CFI_ADJUST_CFA_OFFSET -8
+ iretq
+ CFI_ENDPROC
+END(kdb_call)
+
+#endif /* CONFIG_KDB */
+
+
/*
* Some functions should be protected against kprobes
*/
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -3,6 +3,10 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/efi.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif /* CONFIG_KDB */
+#include <linux/kexec.h>
#include <linux/dmi.h>
#include <linux/sched.h>
#include <linux/tboot.h>
@@ -630,6 +634,14 @@ void native_machine_shutdown(void)
/* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
+#if defined(CONFIG_X86_32) && defined(CONFIG_KDB)
+ /*
+ * If this restart is occuring while kdb is running (e.g. reboot
+ * command), the other CPU's are already stopped. Don't try to
+ * stop them yet again.
+ */
+ if (!KDB_IS_RUNNING())
+#endif /* defined(CONFIG_X86_32) && defined(CONFIG_KDB) */
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
@@ -740,6 +752,29 @@ static nmi_shootdown_cb shootdown_callba
static atomic_t waiting_for_crash_ipi;
+#ifdef CONFIG_KDB_KDUMP
+void halt_current_cpu(struct pt_regs *regs)
+{
+#ifdef CONFIG_X86_32
+ struct pt_regs fixed_regs;
+#endif
+ local_irq_disable();
+#ifdef CONFIG_X86_32
+ if (!user_mode_vm(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+#endif
+ crash_save_cpu(regs, raw_smp_processor_id());
+ disable_local_APIC();
+ atomic_dec(&waiting_for_crash_ipi);
+ /* Assume hlt works */
+ halt();
+ for(;;)
+ cpu_relax();
+}
+#endif /* CONFIG_KDB_KDUMP */
+
static int crash_nmi_callback(struct notifier_block *self,
unsigned long val, void *data)
{
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -44,6 +44,10 @@
#include <linux/edac.h>
#endif
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif /* CONFIG_KDB */
+
#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
@@ -361,6 +365,10 @@ io_check_error(unsigned char reason, str
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
+#ifdef CONFIG_KDB
+ (void)kdb(KDB_REASON_NMI, reason, regs);
+#endif /* CONFIG_KDB */
+
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
NOTIFY_STOP)
return;
@@ -396,6 +404,16 @@ static notrace __kprobes void default_do
if (!cpu)
reason = get_nmi_reason();
+#if defined(CONFIG_SMP) && defined(CONFIG_KDB)
+ /*
+ * Call the kernel debugger to see if this NMI is due
+ * to an KDB requested IPI. If so, kdb will handle it.
+ */
+ if (kdb_ipi(regs, NULL)) {
+ return;
+ }
+#endif /* defined(CONFIG_SMP) && defined(CONFIG_KDB) */
+
if (!(reason & 0xc0)) {
if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
== NOTIFY_STOP)
@@ -460,6 +478,10 @@ void restart_nmi(void)
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
+#ifdef CONFIG_KDB
+ if (kdb(KDB_REASON_BREAK, error_code, regs))
+ return;
+#endif
#ifdef CONFIG_KPROBES
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
@@ -552,6 +574,11 @@ dotraplinkage void __kprobes do_debug(st
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
+#ifdef CONFIG_KDB
+ if (kdb(KDB_REASON_DEBUG, error_code, regs))
+ return;
+#endif /* CONFIG_KDB */
+
if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
SIGTRAP) == NOTIFY_STOP)
return;