summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Zankel <czankel@tensilica.com>2005-06-23 22:01:30 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-24 00:05:22 -0700
commite344b63eeec7850b5e900e10c8a6c61d083fd3a4 (patch)
tree4871ace0c16423ce4697e4065841c3a0f55563c6
parent9a8fd5589902153a134111ed7a40f9cca1f83254 (diff)
[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 7
The attached patches provides part 7 of an architecture implementation for the Tensilica Xtensa CPU series. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-xtensa/xtensa/cacheasm.h708
-rw-r--r--include/asm-xtensa/xtensa/cacheattrasm.h432
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/core.h1270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/defs.h270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/specreg.h99
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/system.h198
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/tie.h275
-rw-r--r--include/asm-xtensa/xtensa/coreasm.h526
-rw-r--r--include/asm-xtensa/xtensa/corebits.h77
-rw-r--r--include/asm-xtensa/xtensa/hal.h822
-rw-r--r--include/asm-xtensa/xtensa/simcall.h130
-rw-r--r--include/asm-xtensa/xtensa/xt2000-uart.h155
-rw-r--r--include/asm-xtensa/xtensa/xt2000.h408
-rw-r--r--include/asm-xtensa/xtensa/xtboard.h120
14 files changed, 5490 insertions, 0 deletions
diff --git a/include/asm-xtensa/xtensa/cacheasm.h b/include/asm-xtensa/xtensa/cacheasm.h
new file mode 100644
index 00000000000..0cdbb0bf180
--- /dev/null
+++ b/include/asm-xtensa/xtensa/cacheasm.h
@@ -0,0 +1,708 @@
+#ifndef XTENSA_CACHEASM_H
+#define XTENSA_CACHEASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/cacheasm.h -- assembler-specific cache
+ * related definitions that depend on CORE configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/coreasm.h>
+
+
+/*
+ * This header file defines assembler macros of the form:
+ * <x>cache_<func>
+ * where <x> is 'i' or 'd' for instruction and data caches,
+ * and <func> indicates the function of the macro.
+ *
+ * The following functions <func> are defined,
+ * and apply only to the specified cache (I or D):
+ *
+ * reset
+ * Resets the cache.
+ *
+ * sync
+ * Makes sure any previous cache instructions have been completed;
+ * ie. makes sure any previous cache control operations
+ * have had full effect and been synchronized to memory.
+ * Eg. any invalidate completed [so as not to generate a hit],
+ * any writebacks or other pipelined writes written to memory, etc.
+ *
+ * invalidate_line (single cache line)
+ * invalidate_region (specified memory range)
+ * invalidate_all (entire cache)
+ * Invalidates all cache entries that cache
+ * data from the specified memory range.
+ * NOTE: locked entries are not invalidated.
+ *
+ * writeback_line (single cache line)
+ * writeback_region (specified memory range)
+ * writeback_all (entire cache)
+ * Writes back to memory all dirty cache entries
+ * that cache data from the specified memory range,
+ * and marks these entries as clean.
+ * NOTE: on some future implementations, this might
+ * also invalidate.
+ * NOTE: locked entries are written back, but never invalidated.
+ * NOTE: instruction caches never implement writeback.
+ *
+ * writeback_inv_line (single cache line)
+ * writeback_inv_region (specified memory range)
+ * writeback_inv_all (entire cache)
+ * Writes back to memory all dirty cache entries
+ * that cache data from the specified memory range,
+ * and invalidates these entries (including all clean
+ * cache entries that cache data from that range).
+ * NOTE: locked entries are written back but not invalidated.
+ * NOTE: instruction caches never implement writeback.
+ *
+ * lock_line (single cache line)
+ * lock_region (specified memory range)
+ * Prefetch and lock the specified memory range into cache.
+ * NOTE: if any part of the specified memory range cannot
+ * be locked, a ??? exception occurs. These macros don't
+ * do anything special (yet anyway) to handle this situation.
+ *
+ * unlock_line (single cache line)
+ * unlock_region (specified memory range)
+ * unlock_all (entire cache)
+ * Unlock cache entries that cache the specified memory range.
+ * Entries not already locked are unaffected.
+ */
+
+
+
+/*************************** GENERIC -- ALL CACHES ***************************/
+
+
+/*
+ * The following macros assume the following cache size/parameter limits
+ * in the current Xtensa core implementation:
+ * cache size: 1024 bytes minimum
+ * line size: 16 - 64 bytes
+ * way count: 1 - 4
+ *
+ * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4
+ * Hence the assumption that each loop can execute four cache instructions.
+ *
+ * Correspondingly, the offset range of instructions is assumed able to cover
+ * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for
+ * both hit and indexed cache instructions. Ie. these offsets are all
+ * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64).
+ * This is true of all original cache instructions
+ * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets
+ * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2).
+ * This is also true of subsequent cache instructions
+ * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets
+ * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4).
+ *
+ * (Maximum cache size, currently 32k, doesn't affect the following macros.
+ * Cache ways > MMU min page size cause aliasing but that's another matter.)
+ */
+
+
+
+/*
+ * Macro to apply an 'indexed' cache instruction to the entire cache.
+ *
+ * Parameters:
+ * cainst instruction/ that takes an address register parameter
+ * and an offset parameter (in range 0 .. 3*linesize).
+ * size size of cache in bytes
+ * linesize size of cache line in bytes
+ * assoc_or1 number of associativities (ways/sets) in cache
+ * if all sets affected by cainst,
+ * or 1 if only one set (or not all sets) of the cache
+ * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]).
+ * aa, ab unique address registers (temporaries)
+ */
+
+ .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab
+
+ // Sanity-check on cache parameters:
+ .ifne (\size % (\linesize * \assoc_or1 * 4))
+ .err // cache configuration outside expected/supported range!
+ .endif
+
+ // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst.
+ movi \aa, (\size / (\linesize * \assoc_or1 * 4))
+ // Possible improvement: need only loop if \aa > 1 ;
+ // however that particular condition is highly unlikely.
+ movi \ab, 0 // to iterate over cache
+ floop \aa, cachex\@
+ \cainst \ab, 0*\linesize
+ \cainst \ab, 1*\linesize
+ \cainst \ab, 2*\linesize
+ \cainst \ab, 3*\linesize
+ addi \ab, \ab, 4*\linesize // move to next line
+ floopend \aa, cachex\@
+
+ .endm
+
+
+/*
+ * Macro to apply a 'hit' cache instruction to a memory region,
+ * ie. to any cache entries that cache a specified portion (region) of memory.
+ * Takes care of the unaligned cases, ie. may apply to one
+ * more cache line than $asize / lineSize if $aaddr is not aligned.
+ *
+ *
+ * Parameters are:
+ * cainst instruction/macro that takes an address register parameter
+ * and an offset parameter (currently always zero)
+ * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.)
+ * linesize_log2 log2(size of cache line in bytes)
+ * addr register containing start address of region (clobbered)
+ * asize register containing size of the region in bytes (clobbered)
+ * askew unique register used as temporary
+ *
+ * !?!?! 2DO: optimization: iterate max(cache_size and \asize) / linesize
+ */
+
+ .macro cache_hit_region cainst, linesize_log2, addr, asize, askew
+
+ // Make \asize the number of iterations:
+ extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr
+ add \asize, \asize, \askew // ... and add it to \asize
+ addi \asize, \asize, (1 << \linesize_log2) - 1 // round up!
+ srli \asize, \asize, \linesize_log2
+
+ // Iterate over region:
+ floopnez \asize, cacheh\@
+ \cainst \addr, 0
+ addi \addr, \addr, (1 << \linesize_log2) // move to next line
+ floopend \asize, cacheh\@
+
+ .endm
+
+
+
+
+
+/*************************** INSTRUCTION CACHE ***************************/
+
+
+/*
+ * Reset/initialize the instruction cache by simply invalidating it:
+ * (need to unlock first also, if cache locking implemented):
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_reset aa, ab
+ icache_unlock_all \aa, \ab
+ icache_invalidate_all \aa, \ab
+ .endm
+
+
+/*
+ * Synchronize after an instruction cache operation,
+ * to be sure everything is in sync with memory as to be
+ * expected following any previous instruction cache control operations.
+ *
+ * Parameters are:
+ * ar an address register (temporary) (currently unused, but may be used in future)
+ */
+ .macro icache_sync ar
+#if XCHAL_ICACHE_SIZE > 0
+ isync
+#endif
+ .endm
+
+
+
+/*
+ * Invalidate a single line of the instruction cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset (optional) offset to add to \ar to compute effective address to invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_invalidate_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0
+ ihi \ar, \offset // invalidate icache line
+ /*
+ * NOTE: in some version of the silicon [!!!SHOULD HAVE BEEN DOCUMENTED!!!]
+ * 'ihi' doesn't work, so it had been replaced with 'iii'
+ * (which would just invalidate more than it should,
+ * which should be okay other than the performance hit
+ * because cache locking did not exist in that version,
+ * unless user somehow relies on something being cached).
+ * [WHAT VERSION IS IT!!?!?
+ * IS THERE ANY WAY TO TEST FOR THAT HERE, TO OUTPUT 'III' ONLY IF NEEDED!?!?].
+ *
+ * iii \ar, \offset
+ */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+
+/*
+ * Invalidate instruction cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_invalidate_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0
+ // Instruction cache region invalidation:
+ cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Invalidate entire instruction cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_invalidate_all aa, ab
+#if XCHAL_ICACHE_SIZE > 0
+ // Instruction cache invalidation:
+ cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab
+ icache_sync \aa
+ // End of instruction cache invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a single line of the instruction cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to lock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to lock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_lock_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ ipfl \ar, \offset /* prefetch and lock icache line */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a specified portion of memory into the instruction cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_lock_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache region lock:
+ cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region lock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a single line of the instruction cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to unlock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to unlock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_unlock_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ ihu \ar, \offset /* unlock icache line */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a specified portion of memory from the instruction cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_unlock_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache region unlock:
+ cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region unlock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock entire instruction cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_unlock_all aa, ab
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache unlock:
+ cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab
+ icache_sync \aa
+ // End of instruction cache unlock
+#endif
+ .endm
+
+
+
+
+
+/*************************** DATA CACHE ***************************/
+
+
+
+/*
+ * Reset/initialize the data cache by simply invalidating it
+ * (need to unlock first also, if cache locking implemented):
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_reset aa, ab
+ dcache_unlock_all \aa, \ab
+ dcache_invalidate_all \aa, \ab
+ .endm
+
+
+
+
+/*
+ * Synchronize after a data cache operation,
+ * to be sure everything is in sync with memory as to be
+ * expected following any previous data cache control operations.
+ *
+ * Parameters are:
+ * ar an address register (temporary) (currently unused, but may be used in future)
+ */
+ .macro dcache_sync ar
+#if XCHAL_DCACHE_SIZE > 0
+ // This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:
+ //memw // synchronize data cache changes relative to subsequent memory accesses
+ //isync // be conservative and ISYNC as well (just to be sure)
+
+ dsync
+#endif
+ .endm
+
+
+
+/*
+ * Synchronize after a data store operation,
+ * to be sure the stored data is completely off the processor
+ * (and assuming there is no buffering outside the processor,
+ * that the data is in memory). This may be required to
+ * ensure that the processor's write buffers are emptied.
+ * A MEMW followed by a read guarantees this, by definition.
+ * We also try to make sure the read itself completes.
+ *
+ * Parameters are:
+ * ar an address register (temporary)
+ */
+ .macro write_sync ar
+ memw // ensure previous memory accesses are complete prior to subsequent memory accesses
+ l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW
+ //slot
+ add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures)
+ .endm
+
+
+/*
+ * Invalidate a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset (optional) offset to add to \ar to compute effective address to invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_invalidate_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0
+ dhi \ar, \offset
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+
+
+/*
+ * Invalidate data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_invalidate_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache region invalidation:
+ cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region invalidation
+#endif
+ .endm
+
+
+
+#if 0
+/*
+ * This is a work-around for a bug in SiChip1 (???).
+ * There should be a proper mechanism for not outputting
+ * these instructions when not needed.
+ * To enable work-around, uncomment this and replace 'dii'
+ * with 'dii_s1' everywhere, eg. in dcache_invalidate_all
+ * macro below.
+ */
+ .macro dii_s1 ar, offset
+ dii \ar, \offset
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ .endm
+#endif
+
+
+/*
+ * Invalidate entire data cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_invalidate_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache invalidation:
+ cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab
+ dcache_sync \aa
+ // End of data cache invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Writeback a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to writeback
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to writeback
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_writeback_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ dhwb \ar, \offset
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Writeback dirty data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_writeback_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ // Data cache region writeback:
+ cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region writeback
+#endif
+ .endm
+
+
+
+/*
+ * Writeback entire data cache.
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_writeback_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ // Data cache writeback:
+ cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+ // End of data cache writeback
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to writeback and invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to writeback and invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_writeback_inv_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0
+ dhwbi \ar, \offset /* writeback and invalidate dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_writeback_inv_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache region writeback and invalidate:
+ cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region writeback and invalidate
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate entire data cache.
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_writeback_inv_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache writeback and invalidate:
+#if XCHAL_DCACHE_IS_WRITEBACK
+ cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+#else /*writeback*/
+ // Data cache does not support writeback, so just invalidate: */
+ dcache_invalidate_all \aa, \ab
+#endif /*writeback*/
+ // End of data cache writeback and invalidate
+#endif
+ .endm
+
+
+
+
+/*
+ * Lock (prefetch & lock) a single line of the data cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to lock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to lock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_lock_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ dpfl \ar, \offset /* prefetch and lock dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a specified portion of memory into the data cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_lock_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache region lock:
+ cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region lock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a single line of the data cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to unlock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to unlock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_unlock_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ dhu \ar, \offset /* unlock dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a specified portion of memory from the data cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_unlock_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache region unlock:
+ cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region unlock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock entire data cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_unlock_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache unlock:
+ cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+ // End of data cache unlock
+#endif
+ .endm
+
+
+#endif /*XTENSA_CACHEASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/cacheattrasm.h b/include/asm-xtensa/xtensa/cacheattrasm.h
new file mode 100644
index 00000000000..1c3e117b359
--- /dev/null
+++ b/include/asm-xtensa/xtensa/cacheattrasm.h
@@ -0,0 +1,432 @@
+#ifndef XTENSA_CACHEATTRASM_H
+#define XTENSA_CACHEATTRASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/cacheattrasm.h -- assembler-specific
+ * CACHEATTR register related definitions that depend on CORE
+ * configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/coreasm.h>
+
+
+/*
+ * This header file defines assembler macros of the form:
+ * <x>cacheattr_<func>
+ * where:
+ * <x> is 'i', 'd' or absent for instruction, data
+ * or both caches; and
+ * <func> indicates the function of the macro.
+ *
+ * The following functions are defined:
+ *
+ * icacheattr_get
+ * Reads I-cache CACHEATTR into a2 (clobbers a3-a5).
+ *
+ * dcacheattr_get
+ * Reads D-cache CACHEATTR into a2 (clobbers a3-a5).
+ * (Note: for configs with a real CACHEATTR register, the
+ * above two macros are identical.)
+ *
+ * cacheattr_set
+ * Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered).
+ * Works even when changing one's own code's attributes.
+ *
+ * icacheattr_is_enabled label
+ * Branches to \label if I-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * dcacheattr_is_enabled label
+ * Branches to \label if D-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * cacheattr_is_enabled label
+ * Branches to \label if either I-cache or D-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * The following macros are only defined under certain conditions:
+ *
+ * icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
+ * Writes I-cache CACHEATTR from a2 (a3-a8 clobbered).
+ *
+ * dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
+ * Writes D-cache CACHEATTR from a2 (a3-a8 clobbered).
+ */
+
+
+
+/*************************** GENERIC -- ALL CACHES ***************************/
+
+/*
+ * _cacheattr_get
+ *
+ * (Internal macro.)
+ * Returns value of CACHEATTR register (or closest equivalent) in a2.
+ *
+ * Entry:
+ * (none)
+ * Exit:
+ * a2 value read from CACHEATTR
+ * a3-a5 clobbered (temporaries)
+ */
+ .macro _cacheattr_get tlb
+#if XCHAL_HAVE_CACHEATTR
+ rsr a2, CACHEATTR
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // We have a config that "mimics" CACHEATTR using a simplified
+ // "MMU" composed of a single statically-mapped way.
+ // DTLB and ITLB are independent, so there's no single
+ // cache attribute that can describe both. So for now
+ // just return the DTLB state.
+ movi a5, 0xE0000000
+ movi a2, 0
+ movi a3, 0
+1: add a3, a3, a5 // next segment
+ r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0
+ dsync // interlock???
+ slli a2, a2, 4
+ extui a4, a4, 0, 4 // extract CA
+ or a2, a2, a4
+ bnez a3, 1b
+#else
+ // This macro isn't applicable to arbitrary MMU configurations.
+ // Just return zero.
+ movi a2, 0
+#endif
+ .endm
+
+ .macro icacheattr_get
+ _cacheattr_get itlb
+ .endm
+
+ .macro dcacheattr_get
+ _cacheattr_get dtlb
+ .endm
+
+
+#define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 /* default (powerup/reset) value of CACHEATTR, all BYPASS
+ mode (ie. disabled/bypassed caches) */
+
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+
+#define XCHAL_FCA_ENAMASK 0x001A /* bitmap of fetch attributes that require enabled icache */
+#define XCHAL_LCA_ENAMASK 0x0003 /* bitmap of load attributes that require enabled dcache */
+#define XCHAL_SCA_ENAMASK 0x0003 /* bitmap of store attributes that require enabled dcache */
+#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */
+#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */
+
+/*
+ * _cacheattr_is_enabled
+ *
+ * (Internal macro.)
+ * Branches to \label if CACHEATTR in a2 indicates an enabled
+ * cache, using mask in a3.
+ *
+ * Parameters:
+ * label where to branch to if cache is enabled
+ * Entry:
+ * a2 contains CACHEATTR value used to determine whether
+ * caches are enabled
+ * a3 16-bit constant where each bit correspond to
+ * one of the 16 possible CA values (in a CACHEATTR mask);
+ * CA values that indicate the cache is enabled
+ * have their corresponding bit set in this mask
+ * (eg. use XCHAL_xCA_ENAMASK , above)
+ * Exit:
+ * a2,a4,a5 clobbered
+ * SAR clobbered
+ */
+ .macro _cacheattr_is_enabled label
+ movi a4, 8 // loop 8 times
+.Lcaife\@:
+ extui a5, a2, 0, 4 // get CA nibble
+ ssr a5 // index into mask according to CA...
+ srl a5, a3 // ...and get CA's mask bit in a5 bit 0
+ bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label
+ srli a2, a2, 4 // next nibble
+ addi a4, a4, -1
+ bnez a4, .Lcaife\@ // loop for each nibble
+ .endm
+
+#else /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+ .macro _cacheattr_is_enabled label
+ j \label // macro not applicable, assume caches always enabled
+ .endm
+#endif /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+
+
+
+/*
+ * icacheattr_is_enabled
+ *
+ * Branches to \label if I-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if icache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro icacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ icacheattr_get
+ movi a3, XCHAL_FCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+/*
+ * dcacheattr_is_enabled
+ *
+ * Branches to \label if D-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if dcache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro dcacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ dcacheattr_get
+ movi a3, XCHAL_LSCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+/*
+ * cacheattr_is_enabled
+ *
+ * Branches to \label if either I-cache or D-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if a cache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro cacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR
+ rsr a2, CACHEATTR
+ movi a3, XCHAL_ALLCA_ENAMASK
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ icacheattr_get
+ movi a3, XCHAL_FCA_ENAMASK
+ _cacheattr_is_enabled \label
+ dcacheattr_get
+ movi a3, XCHAL_LSCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+
+
+/*
+ * The ISA does not have a defined way to change the
+ * instruction cache attributes of the running code,
+ * ie. of the memory area that encloses the current PC.
+ * However, each micro-architecture (or class of
+ * configurations within a micro-architecture)
+ * provides a way to deal with this issue.
+ *
+ * Here are a few macros used to implement the relevant
+ * approach taken.
+ */
+
+#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // We have a config that "mimics" CACHEATTR using a simplified
+ // "MMU" composed of a single statically-mapped way.
+
+/*
+ * icacheattr_set
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+ .macro icacheattr_set
+
+ movi a5, 0xE0000000 // mask of upper 3 bits
+ movi a6, 3f // PC where ITLB is set
+ movi a3, 0 // start at region 0 (0 .. 7)
+ and a6, a6, a5 // upper 3 bits of local PC area
+ mov a7, a2 // copy a2 so it doesn't get clobbered
+ j 3f
+
+# if XCHAL_HAVE_XLT_CACHEATTR
+ // Can do translations, use generic method:
+1: sub a6, a3, a5 // address of some other segment
+ ritlb1 a8, a6 // save its PPN+CA
+ dsync // interlock??
+ witlb a4, a6 // make it translate to this code area
+ movi a6, 5f // where to jump into it
+ isync
+ sub a6, a6, a5 // adjust jump address within that other segment
+ jx a6
+
+ // Note that in the following code snippet, which runs at a different virtual
+ // address than it is assembled for, we avoid using literals (eg. via movi/l32r)
+ // just in case literals end up in a different 512 MB segment, and we avoid
+ // instructions that rely on the current PC being what is expected.
+ //
+ .align 4
+ _j 6f // this is at label '5' minus 4 bytes
+ .align 4
+5: witlb a4, a3 // we're in other segment, now can write previous segment's CA
+ isync
+ add a6, a6, a5 // back to previous segment
+ addi a6, a6, -4 // next jump label
+ jx a6
+
+6: sub a6, a3, a5 // address of some other segment
+ witlb a8, a6 // restore PPN+CA of other segment
+ mov a6, a3 // restore a6
+ isync
+# else /* XCHAL_HAVE_XLT_CACHEATTR */
+ // Use micro-architecture specific method.
+ // The following 4-instruction sequence is aligned such that
+ // it all fits within a single I-cache line. Sixteen byte
+ // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE
+ // actually causes problems because that can be greater than
+ // the alignment of the reset vector, where this macro is often
+ // invoked, which would cause the linker to align the reset
+ // vector code away from the reset vector!!).
+ .align 16 /*XCHAL_ICACHE_LINESIZE*/
+1: _witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB
+ _isync
+ nop
+ nop
+# endif /* XCHAL_HAVE_XLT_CACHEATTR */
+ beq a3, a5, 4f // done?
+
+ // Note that in the WITLB loop, we don't do any load/stores
+ // (may not be an issue here, but it is important in the DTLB case).
+2: srli a7, a7, 4 // next CA
+ sub a3, a3, a5 // next segment (add 0x20000000)
+3:
+# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
+ ritlb1 a8, a3 // get current PPN+CA of segment
+ dsync // interlock???
+ extui a4, a7, 0, 4 // extract CA to set
+ srli a8, a8, 4 // clear CA but keep PPN ...
+ slli a8, a8, 4 // ...
+ add a4, a4, a8 // combine new CA with PPN to preserve
+# else
+ extui a4, a7, 0, 4 // extract CA
+# endif
+ beq a3, a6, 1b // current PC's region? if so, do it in a safe way
+ witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB
+ bne a3, a5, 2b
+ isync // make sure all ifetch changes take effect
+4:
+ .endm // icacheattr_set
+
+
+/*
+ * dcacheattr_set
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+
+ .macro dcacheattr_set
+
+ movi a5, 0xE0000000 // mask of upper 3 bits
+ movi a3, 0 // start at region 0 (0 .. 7)
+ mov a7, a2 // copy a2 so it doesn't get clobbered
+ j 3f
+ // Note that in the WDTLB loop, we don't do any load/stores
+ // (including implicit l32r via movi) because it isn't safe.
+2: srli a7, a7, 4 // next CA
+ sub a3, a3, a5 // next segment (add 0x20000000)
+3:
+# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
+ rdtlb1 a8, a3 // get current PPN+CA of segment
+ dsync // interlock???
+ extui a4, a7, 0, 4 // extract CA to set
+ srli a8, a8, 4 // clear CA but keep PPN ...
+ slli a8, a8, 4 // ...
+ add a4, a4, a8 // combine new CA with PPN to preserve
+# else
+ extui a4, a7, 0, 4 // extract CA to set
+# endif
+ wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB
+ bne a3, a5, 2b
+ dsync // make sure all data path changes take effect
+ .endm // dcacheattr_set
+
+#endif /* XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+
+
+
+/*
+ * cacheattr_set
+ *
+ * Macro that sets the current CACHEATTR safely
+ * (both i and d) according to the current contents of a2.
+ * It works even when changing the cache attributes of
+ * the currently running code.
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+ .macro cacheattr_set
+
+#if XCHAL_HAVE_CACHEATTR
+# if XCHAL_ICACHE_LINESIZE < 4
+ // No i-cache, so can always safely write to CACHEATTR:
+ wsr a2, CACHEATTR
+# else
+ // The Athens micro-architecture, when using the old
+ // exception architecture option (ie. with the CACHEATTR register)
+ // allows changing the cache attributes of the running code
+ // using the following exact sequence aligned to be within
+ // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE
+ // alignment actually causes problems because that can be greater
+ // than the alignment of the reset vector, where this macro is often
+ // invoked, which would cause the linker to align the reset
+ // vector code away from the reset vector!!).
+ j 1f
+ .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line
+1: _wsr a2, CACHEATTR
+ _isync
+ nop
+ nop
+# endif
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // DTLB and ITLB are independent, but to keep semantics
+ // of this macro we simply write to both.
+ icacheattr_set
+ dcacheattr_set
+#else
+ // This macro isn't applicable to arbitrary MMU configurations.
+ // Do nothing in this case.
+#endif
+ .endm
+
+
+#endif /*XTENSA_CACHEATTRASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/core.h b/include/asm-xtensa/xtensa/config-linux_be/core.h
new file mode 100644
index 00000000000..d54fe5eb106
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/core.h
@@ -0,0 +1,1270 @@
+/*
+ * xtensa/config/core.h -- HAL definitions that are dependent on CORE configuration
+ *
+ * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
+ * It was generated for a specific Xtensa processor configuration.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL source itself to include this file.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_CORE_H
+#define XTENSA_CONFIG_CORE_H
+
+#include <xtensa/hal.h>
+
+
+/*----------------------------------------------------------------------
+ GENERAL
+ ----------------------------------------------------------------------*/
+
+/*
+ * Separators for macros that expand into arrays.
+ * These can be predefined by files that #include this one,
+ * when different separators are required.
+ */
+/* Element separator for macros that expand into 1-dimensional arrays: */
+#ifndef XCHAL_SEP
+#define XCHAL_SEP ,
+#endif
+/* Array separator for macros that expand into 2-dimensional arrays: */
+#ifndef XCHAL_SEP2
+#define XCHAL_SEP2 },{
+#endif
+
+
+/*----------------------------------------------------------------------
+ ENDIANNESS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1
+#define XCHAL_HAVE_LE 0
+#define XCHAL_MEMORY_ORDER XTHAL_BIGENDIAN
+
+
+/*----------------------------------------------------------------------
+ REGISTER WINDOWS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_WINDOWED 1 /* 1 if windowed registers option configured, 0 otherwise */
+#define XCHAL_NUM_AREGS 64 /* number of physical address regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+
+
+/*----------------------------------------------------------------------
+ ADDRESS ALIGNMENT
+ ----------------------------------------------------------------------*/
+
+/* These apply to a selected set of core load and store instructions only (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* 1 if unaligned loads cause an exception, 0 otherwise */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* 1 if unaligned stores cause an exception, 0 otherwise */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* 1 if interrupt option configured, 0 otherwise */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* 1 if high-priority interrupt option configured, 0 otherwise */
+#define XCHAL_HAVE_HIGHLEVEL_INTERRUPTS XCHAL_HAVE_HIGHPRI_INTERRUPTS
+#define XCHAL_HAVE_NMI 0 /* 1 if NMI option configured, 0 otherwise */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* number of bits to hold an interrupt number: roundup(log2(number of interrupts)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* number of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels (not including level zero!) */
+#define XCHAL_NUM_LOWPRI_LEVELS 1 /* number of low-priority interrupt levels (always 1) */
+#define XCHAL_FIRST_HIGHPRI_LEVEL (XCHAL_NUM_LOWPRI_LEVELS+1) /* level of first high-priority interrupt (always 2) */
+#define XCHAL_EXCM_LEVEL 1 /* level of interrupts masked by PS.EXCM (XEA2 only; always 1 in T10xx);
+ for XEA1, where there is no PS.EXCM, this is always 1;
+ interrupts at levels FIRST_HIGHPRI <= n <= EXCM_LEVEL, if any,
+ are termed "medium priority" interrupts (post T10xx only) */
+/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL0_MASK 0x00000000
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+#define XCHAL_INTLEVEL8_MASK 0x00000000
+#define XCHAL_INTLEVEL9_MASK 0x00000000
+#define XCHAL_INTLEVEL10_MASK 0x00000000
+#define XCHAL_INTLEVEL11_MASK 0x00000000
+#define XCHAL_INTLEVEL12_MASK 0x00000000
+#define XCHAL_INTLEVEL13_MASK 0x00000000
+#define XCHAL_INTLEVEL14_MASK 0x00000000
+#define XCHAL_INTLEVEL15_MASK 0x00000000
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTLEVEL_MASKS 0x00000000 XCHAL_SEP \
+ 0x000064F9 XCHAL_SEP \
+ 0x00008902 XCHAL_SEP \
+ 0x00011204 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL0_ANDBELOW_MASK 0x00000000
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL8_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL9_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL10_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL11_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL12_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL13_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL14_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL15_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_LOWPRI_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all low-priority interrupts */
+#define XCHAL_EXCM_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all interrupts masked by PS.EXCM (or CEXCM) */
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTLEVEL_ANDBELOW_MASKS 0x00000000 XCHAL_SEP \
+ 0x000064F9 XCHAL_SEP \
+ 0x0000EDFB XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF
+
+/* Interrupt numbers for each interrupt level at which only one interrupt was configured: */
+/*#define XCHAL_INTLEVEL1_NUM ...more than one interrupt at this level...*/
+/*#define XCHAL_INTLEVEL2_NUM ...more than one interrupt at this level...*/
+/*#define XCHAL_INTLEVEL3_NUM ...more than one interrupt at this level...*/
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_INT17_LEVEL 0
+#define XCHAL_INT18_LEVEL 0
+#define XCHAL_INT19_LEVEL 0
+#define XCHAL_INT20_LEVEL 0
+#define XCHAL_INT21_LEVEL 0
+#define XCHAL_INT22_LEVEL 0
+#define XCHAL_INT23_LEVEL 0
+#define XCHAL_INT24_LEVEL 0
+#define XCHAL_INT25_LEVEL 0
+#define XCHAL_INT26_LEVEL 0
+#define XCHAL_INT27_LEVEL 0
+#define XCHAL_INT28_LEVEL 0
+#define XCHAL_INT29_LEVEL 0
+#define XCHAL_INT30_LEVEL 0
+#define XCHAL_INT31_LEVEL 0
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INT_LEVELS 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT22_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT23_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT24_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT25_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT26_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT27_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT28_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT29_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT30_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT31_TYPE XTHAL_INTTYPE_UNCONFIGURED
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INT_TYPES XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTTYPE_MASKS 0xFFFE0000 XCHAL_SEP \
+ 0x0001E000 XCHAL_SEP \
+ 0x00000380 XCHAL_SEP \
+ 0x0000007F XCHAL_SEP \
+ 0x00001C00 XCHAL_SEP \
+ 0x00000000
+
+/* Interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3), -1 if unassigned */
+#define XCHAL_TIMER0_INTERRUPT 10
+#define XCHAL_TIMER1_INTERRUPT 11
+#define XCHAL_TIMER2_INTERRUPT 12
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_TIMER_INTERRUPTS 10 XCHAL_SEP \
+ 11 XCHAL_SEP \
+ 12 XCHAL_SEP \
+ XTHAL_TIMER_UNCONFIGURED
+
+/* Indexing macros: */
+#define _XCHAL_INTLEVEL_MASK(n) XCHAL_INTLEVEL ## n ## _MASK
+#define XCHAL_INTLEVEL_MASK(n) _XCHAL_INTLEVEL_MASK(n) /* n = 0 .. 15 */
+#define _XCHAL_INTLEVEL_ANDBELOWMASK(n) XCHAL_INTLEVEL ## n ## _ANDBELOW_MASK
+#define XCHAL_INTLEVEL_ANDBELOW_MASK(n) _XCHAL_INTLEVEL_ANDBELOWMASK(n) /* n = 0 .. 15 */
+#define _XCHAL_INT_LEVEL(n) XCHAL_INT ## n ## _LEVEL
+#define XCHAL_INT_LEVEL(n) _XCHAL_INT_LEVEL(n) /* n = 0 .. 31 */
+#define _XCHAL_INT_TYPE(n) XCHAL_INT ## n ## _TYPE
+#define XCHAL_INT_TYPE(n) _XCHAL_INT_TYPE(n) /* n = 0 .. 31 */
+#define _XCHAL_TIMER_INTERRUPT(n) XCHAL_TIMER ## n ## _INTERRUPT
+#define XCHAL_TIMER_INTERRUPT(n) _XCHAL_TIMER_INTERRUPT(n) /* n = 0 .. 3 */
+
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+/* Corresponding interrupt masks: */
+#define XCHAL_EXTINT0_MASK 0x00000001
+#define XCHAL_EXTINT1_MASK 0x00000002
+#define XCHAL_EXTINT2_MASK 0x00000004
+#define XCHAL_EXTINT3_MASK 0x00000008
+#define XCHAL_EXTINT4_MASK 0x00000010
+#define XCHAL_EXTINT5_MASK 0x00000020
+#define XCHAL_EXTINT6_MASK 0x00000040
+#define XCHAL_EXTINT7_MASK 0x00000080
+#define XCHAL_EXTINT8_MASK 0x00000100
+#define XCHAL_EXTINT9_MASK 0x00000200
+
+/* Core config interrupt levels mapped to each external interrupt: */
+#define XCHAL_EXTINT0_LEVEL 1 /* (int number 0) */
+#define XCHAL_EXTINT1_LEVEL 2 /* (int number 1) */
+#define XCHAL_EXTINT2_LEVEL 3 /* (int number 2) */
+#define XCHAL_EXTINT3_LEVEL 1 /* (int number 3) */
+#define XCHAL_EXTINT4_LEVEL 1 /* (int number 4) */
+#define XCHAL_EXTINT5_LEVEL 1 /* (int number 5) */
+#define XCHAL_EXTINT6_LEVEL 1 /* (int number 6) */
+#define XCHAL_EXTINT7_LEVEL 1 /* (int number 7) */
+#define XCHAL_EXTINT8_LEVEL 2 /* (int number 8) */
+#define XCHAL_EXTINT9_LEVEL 3 /* (int number 9) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_EXCEPTIONS 1 /* 1 if exception option configured, 0 otherwise */
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture number: 1 for XEA1 (old), 2 for XEA2 (new) */
+#define XCHAL_HAVE_XEA1 0 /* 1 if XEA1, 0 otherwise */
+#define XCHAL_HAVE_XEA2 1 /* 1 if XEA2, 0 otherwise */
+/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */
+#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if old exception architecture (XEA1), 0 otherwise (eg. XEA2) */
+#define XCHAL_HAVE_EXCM XCHAL_HAVE_XEA2 /* (DEPRECATED) 1 if PS.EXCM bit exists (currently equals XCHAL_HAVE_TLBS) */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_PROGRAMEXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_USEREXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_PROGRAMEXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_USEREXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_STACKEDEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_KERNELEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_STACKEDEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_KERNELEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+/* Indexing macros: */
+#define _XCHAL_INTLEVEL_VECTOR_VADDR(n) XCHAL_INTLEVEL ## n ## _VECTOR_VADDR
+#define XCHAL_INTLEVEL_VECTOR_VADDR(n) _XCHAL_INTLEVEL_VECTOR_VADDR(n) /* n = 0 .. 15 */
+
+/*
+ * General Exception Causes
+ * (values of EXCCAUSE special register set by general exceptions,
+ * which vector to the user, kernel, or double-exception vectors):
+ */
+#define XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION 0 /* Illegal Instruction (IllegalInstruction) */
+#define XCHAL_EXCCAUSE_SYSTEM_CALL 1 /* System Call (SystemCall) */
+#define XCHAL_EXCCAUSE_INSTRUCTION_FETCH_ERROR 2 /* Instruction Fetch Error (InstructionFetchError) */
+#define XCHAL_EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error (LoadStoreError) */
+#define XCHAL_EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt (Level1Interrupt) */
+#define XCHAL_EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (Alloca) */
+#define XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero (IntegerDivideByZero) */
+#define XCHAL_EXCCAUSE_SPECULATION 7 /* Speculation (Speculation) */
+#define XCHAL_EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction (Privileged) */
+#define XCHAL_EXCCAUSE_UNALIGNED 9 /* Unaligned Load Store (Unaligned) */
+#define XCHAL_EXCCAUSE_ITLB_MISS 16 /* ITlb Miss Exception (ITlbMiss) */
+#define XCHAL_EXCCAUSE_ITLB_MULTIHIT 17 /* ITlb Mutltihit Exception (ITlbMultihit) */
+#define XCHAL_EXCCAUSE_ITLB_PRIVILEGE 18 /* ITlb Privilege Exception (ITlbPrivilege) */
+#define XCHAL_EXCCAUSE_ITLB_SIZE_RESTRICTION 19 /* ITlb Size Restriction Exception (ITlbSizeRestriction) */
+#define XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20 /* Fetch Cache Attribute Exception (FetchCacheAttribute) */
+#define XCHAL_EXCCAUSE_DTLB_MISS 24 /* DTlb Miss Exception (DTlbMiss) */
+#define XCHAL_EXCCAUSE_DTLB_MULTIHIT 25 /* DTlb Multihit Exception (DTlbMultihit) */
+#define XCHAL_EXCCAUSE_DTLB_PRIVILEGE 26 /* DTlb Privilege Exception (DTlbPrivilege) */
+#define XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION 27 /* DTlb Size Restriction Exception (DTlbSizeRestriction) */
+#define XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 /* Load Cache Attribute Exception (LoadCacheAttribute) */
+#define XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 /* Store Cache Attribute Exception (StoreCacheAttribute) */
+#define XCHAL_EXCCAUSE_FLOATING_POINT 40 /* Floating Point Exception (FloatingPoint) */
+
+
+
+/*----------------------------------------------------------------------
+ TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_CCOUNT 1 /* 1 if have CCOUNT, 0 otherwise */
+/*#define XCHAL_HAVE_TIMERS XCHAL_HAVE_CCOUNT*/
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_DEBUG 1 /* 1 if debug option configured, 0 otherwise */
+#define XCHAL_HAVE_OCD 1 /* 1 if OnChipDebug option configured, 0 otherwise */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+/*DebugExternalInterrupt 0 0|1*/
+/*DebugUseDIRArray 0 0|1*/
+
+
+
+
+/*----------------------------------------------------------------------
+ COPROCESSORS and EXTRA STATE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_CP 0 /* 1 if coprocessor option configured (CPENABLE present) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one (per cfg) */
+
+#include <xtensa/config/tie.h>
+
+
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instruction ROMs configured */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instruction RAMs configured */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs configured */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs configured */
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports configured */
+#define XCHAL_NUM_IROM XCHAL_NUM_INSTROM /* (DEPRECATED) */
+#define XCHAL_NUM_IRAM XCHAL_NUM_INSTRAM /* (DEPRECATED) */
+#define XCHAL_NUM_DROM XCHAL_NUM_DATAROM /* (DEPRECATED) */
+#define XCHAL_NUM_DRAM XCHAL_NUM_DATARAM /* (DEPRECATED) */
+
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+/* Size of the cache lines in log2(bytes): */
+#define XCHAL_ICACHE_LINEWIDTH 4
+#define XCHAL_DCACHE_LINEWIDTH 4
+/* Size of the cache lines in bytes: */
+#define XCHAL_ICACHE_LINESIZE 16
+#define XCHAL_DCACHE_LINESIZE 16
+/* Max for both I-cache and D-cache (used for general alignment): */
+#define XCHAL_CACHE_LINEWIDTH_MAX 4
+#define XCHAL_CACHE_LINESIZE_MAX 16
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+/* Max for both I-cache and D-cache (used for general cache-coherency page alignment): */
+#define XCHAL_CACHE_SETWIDTH_MAX 8
+#define XCHAL_CACHE_SETSIZE_MAX 256
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Size of the caches in bytes (ways * 2^(linewidth + setwidth)): */
+#define XCHAL_ICACHE_SIZE 8192
+#define XCHAL_DCACHE_SIZE 8192
+
+/* Cache features: */
+#define XCHAL_DCACHE_IS_WRITEBACK 0
+/* Whether cache locking feature is available: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+
+/* Number of (encoded) cache attribute bits: */
+#define XCHAL_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+/* (The number of access mode bits (decoded cache attribute bits) is defined by the architecture; see xtensa/hal.h?) */
+
+
+/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
+#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION
+#define XCHAL_LCA_LIST XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_NACACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_NACACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_ISOLATE XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED
+#define XCHAL_SCA_LIST XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_BYPASS XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_ISOLATE XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU
+
+/* Test:
+ read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
+ read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
+ all: 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
+ fault: 0 + 2 + 4 + 6 + 8 + 10 + 12 + 14
+ r/w/x cached:
+ r/w/x dcached:
+ I-bypass: 1 + 3
+
+ load guard bit set: 1 + 3
+ load guard bit clr: 0 + 2 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
+ hit-cache r/w/x: 7 + 11
+
+ fams: 5
+ fams: 0 / 6 / 18 / 1 / 2
+ fams: Bypass / Isolate / Cached / Exception / NACached
+
+ MMU okay: yes
+*/
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/*
+ * General notes on MMU parameters.
+ *
+ * Terminology:
+ * ASID = address-space ID (acts as an "extension" of virtual addresses)
+ * VPN = virtual page number
+ * PPN = physical page number
+ * CA = encoded cache attribute (access modes)
+ * TLB = translation look-aside buffer (term is stretched somewhat here)
+ * I = instruction (fetch accesses)
+ * D = data (load and store accesses)
+ * way = each TLB (ITLB and DTLB) consists of a number of "ways"
+ * that simultaneously match the virtual address of an access;
+ * a TLB successfully translates a virtual address if exactly
+ * one way matches the vaddr; if none match, it is a miss;
+ * if multiple match, one gets a "multihit" exception;
+ * each way can be independently configured in terms of number of
+ * entries, page sizes, which fields are writable or constant, etc.
+ * set = group of contiguous ways with exactly identical parameters
+ * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
+ * from the page table and storing it in one of the auto-refill ways;
+ * if this PTE load also misses, a miss exception is posted for s/w.
+ * min-wired = a "min-wired" way can be used to map a single (minimum-sized)
+ * page arbitrarily under program control; it has a single entry,
+ * is non-auto-refill (some other way(s) must be auto-refill),
+ * all its fields (VPN, PPN, ASID, CA) are all writable, and it
+ * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
+ * restriction is that this be the only page size it supports).
+ *
+ * TLB way entries are virtually indexed.
+ * TLB ways that support multiple page sizes:
+ * - must have all writable VPN and PPN fields;
+ * - can only use one page size at any given time (eg. setup at startup),
+ * selected by the respective ITLBCFG or DTLBCFG special register,
+ * whose bits n*4+3 .. n*4 index the list of page sizes for way n
+ * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
+ * this list may be sparse for auto-refill ways because auto-refill
+ * ways have independent lists of supported page sizes sharing a
+ * common encoding with PTE entries; the encoding is the index into
+ * this list; unsupported sizes for a given way are zero in the list;
+ * selecting unsupported sizes results in undefined hardware behaviour;
+ * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
+ */
+
+#define XCHAL_HAVE_CACHEATTR 0 /* 1 if CACHEATTR register present, 0 if TLBs present instead */
+#define XCHAL_HAVE_TLBS 1 /* 1 if TLBs present, 0 if CACHEATTR present instead */
+#define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS /* (DEPRECATED; use XCHAL_HAVE_TLBS instead; will be removed in future release) */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* 1 if single way maps entire virtual address space in I+D */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* 1 if virtual addr == physical addr always, 0 otherwise */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config (CaMMU) */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config, but with translation (CaXltMMU) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs (address space IDs) */
+#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
+#define XCHAL_MMU_ASID_KERNEL 1 /* ASID value indicating kernel (ring 0) address space */
+#define XCHAL_MMU_RINGS 4 /* number of rings supported (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* number of bits needed to hold ring number */
+#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
+#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 12 /* max page size in a PTE structure (log2) */
+#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12 /* min page size in a PTE structure (log2) */
+
+
+/*** Instruction TLB: ***/
+
+#define XCHAL_ITLB_WAY_BITS 3 /* number of bits holding the ways */
+#define XCHAL_ITLB_WAYS 7 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_ITLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_ITLB_SETS 4 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_ITLB_WAY0_SET 0
+#define XCHAL_ITLB_WAY1_SET 0
+#define XCHAL_ITLB_WAY2_SET 0
+#define XCHAL_ITLB_WAY3_SET 0
+#define XCHAL_ITLB_WAY4_SET 1
+#define XCHAL_ITLB_WAY5_SET 2
+#define XCHAL_ITLB_WAY6_SET 3
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_ITLB_ARF_SETS 1 /* number of auto-refill sets */
+#define XCHAL_ITLB_ARF_SET0 0 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
+
+
+/* ITLB way set 0 (group of ways 0 thru 3): */
+#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_ITLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 1 (group of ways 4 thru 4): */
+#define XCHAL_ITLB_SET1_WAY 4 /* index of first way in this way set */
+#define XCHAL_ITLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 2 (group of ways 5 thru 5): */
+#define XCHAL_ITLB_SET2_WAY 5 /* index of first way in this way set */
+#define XCHAL_ITLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of ITLB way set 2 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_ASID_CONST 0x01
+#define XCHAL_ITLB_SET2_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of ITLB way set 2 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_VPN_CONST 0xD0000000
+#define XCHAL_ITLB_SET2_E1_VPN_CONST 0xD8000000
+/* Constant PPN values for each entry of ITLB way set 2 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_PPN_CONST 0x00000000
+#define XCHAL_ITLB_SET2_E1_PPN_CONST 0x00000000
+/* Constant CA values for each entry of ITLB way set 2 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_CA_CONST 0x07
+#define XCHAL_ITLB_SET2_E1_CA_CONST 0x03
+
+/* ITLB way set 3 (group of ways 6 thru 6): */
+#define XCHAL_ITLB_SET3_WAY 6 /* index of first way in this way set */
+#define XCHAL_ITLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of ITLB way set 3 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_ASID_CONST 0x01
+#define XCHAL_ITLB_SET3_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of ITLB way set 3 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_VPN_CONST 0xE0000000
+#define XCHAL_ITLB_SET3_E1_VPN_CONST 0xF0000000
+/* Constant PPN values for each entry of ITLB way set 3 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_PPN_CONST 0xF0000000
+#define XCHAL_ITLB_SET3_E1_PPN_CONST 0xF0000000
+/* Constant CA values for each entry of ITLB way set 3 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_CA_CONST 0x07
+#define XCHAL_ITLB_SET3_E1_CA_CONST 0x03
+
+/* Indexing macros: */
+#define _XCHAL_ITLB_SET(n,_what) XCHAL_ITLB_SET ## n ## _what
+#define XCHAL_ITLB_SET(n,what) _XCHAL_ITLB_SET(n, _ ## what )
+#define _XCHAL_ITLB_SET_E(n,i,_what) XCHAL_ITLB_SET ## n ## _E ## i ## _what
+#define XCHAL_ITLB_SET_E(n,i,what) _XCHAL_ITLB_SET_E(n,i, _ ## what )
+/*
+ * Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)
+ * to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
+ */
+
+
+/*** Data TLB: ***/
+
+#define XCHAL_DTLB_WAY_BITS 4 /* number of bits holding the ways */
+#define XCHAL_DTLB_WAYS 10 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_DTLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_DTLB_SETS 5 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_DTLB_WAY0_SET 0
+#define XCHAL_DTLB_WAY1_SET 0
+#define XCHAL_DTLB_WAY2_SET 0
+#define XCHAL_DTLB_WAY3_SET 0
+#define XCHAL_DTLB_WAY4_SET 1
+#define XCHAL_DTLB_WAY5_SET 2
+#define XCHAL_DTLB_WAY6_SET 3
+#define XCHAL_DTLB_WAY7_SET 4
+#define XCHAL_DTLB_WAY8_SET 4
+#define XCHAL_DTLB_WAY9_SET 4
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_DTLB_ARF_SETS 1 /* number of auto-refill sets */
+#define XCHAL_DTLB_ARF_SET0 0 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_DTLB_MINWIRED_SETS 1 /* number of "min-wired" sets */
+#define XCHAL_DTLB_MINWIRED_SET0 4 /* index of n'th "min-wired" set */
+
+
+/* DTLB way set 0 (group of ways 0 thru 3): */
+#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_DTLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 1 (group of ways 4 thru 4): */
+#define XCHAL_DTLB_SET1_WAY 4 /* index of first way in this way set */
+#define XCHAL_DTLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 2 (group of ways 5 thru 5): */
+#define XCHAL_DTLB_SET2_WAY 5 /* index of first way in this way set */
+#define XCHAL_DTLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of DTLB way set 2 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_ASID_CONST 0x01
+#define XCHAL_DTLB_SET2_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of DTLB way set 2 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_VPN_CONST 0xD0000000
+#define XCHAL_DTLB_SET2_E1_VPN_CONST 0xD8000000
+/* Constant PPN values for each entry of DTLB way set 2 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_PPN_CONST 0x00000000
+#define XCHAL_DTLB_SET2_E1_PPN_CONST 0x00000000
+/* Constant CA values for each entry of DTLB way set 2 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_CA_CONST 0x07
+#define XCHAL_DTLB_SET2_E1_CA_CONST 0x03
+
+/* DTLB way set 3 (group of ways 6 thru 6): */
+#define XCHAL_DTLB_SET3_WAY 6 /* index of first way in this way set */
+#define XCHAL_DTLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of DTLB way set 3 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_ASID_CONST 0x01
+#define XCHAL_DTLB_SET3_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of DTLB way set 3 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_VPN_CONST 0xE0000000
+#define XCHAL_DTLB_SET3_E1_VPN_CONST 0xF0000000
+/* Constant PPN values for each entry of DTLB way set 3 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_PPN_CONST 0xF0000000
+#define XCHAL_DTLB_SET3_E1_PPN_CONST 0xF0000000
+/* Constant CA values for each entry of DTLB way set 3 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_CA_CONST 0x07
+#define XCHAL_DTLB_SET3_E1_CA_CONST 0x03
+
+/* DTLB way set 4 (group of ways 7 thru 9): */
+#define XCHAL_DTLB_SET4_WAY 7 /* index of first way in this way set */
+#define XCHAL_DTLB_SET4_WAYS 3 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET4_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET4_ENTRIES 1 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET4_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET4_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* Indexing macros: */
+#define _XCHAL_DTLB_SET(n,_what) XCHAL_DTLB_SET ## n ## _what
+#define XCHAL_DTLB_SET(n,what) _XCHAL_DTLB_SET(n, _ ## what )
+#define _XCHAL_DTLB_SET_E(n,i,_what) XCHAL_DTLB_SET ## n ## _E ## i ## _what
+#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what )
+/*
+ * Example use: XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)
+ * to get the value of XCHAL_DTLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
+ */
+
+
+/*
+ * Determine whether we have a full MMU (with Page Table and Protection)
+ * usable for an MMU-based OS:
+ */
+#if XCHAL_HAVE_TLBS && !XCHAL_HAVE_SPANNING_WAY && XCHAL_ITLB_ARF_WAYS > 0 && XCHAL_DTLB_ARF_WAYS > 0 && XCHAL_MMU_RINGS >= 2
+# define XCHAL_HAVE_PTP_MMU 1 /* have full MMU (with page table [autorefill] and protection) */
+#else
+# define XCHAL_HAVE_PTP_MMU 0 /* don't have full MMU */
+#endif
+
+/*
+ * For full MMUs, report kernel RAM segment and kernel I/O segment static page mappings:
+ */
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KSEG_CACHED_VADDR 0xD0000000 /* virt.addr of kernel RAM cached static map */
+#define XCHAL_KSEG_CACHED_PADDR 0x00000000 /* phys.addr of kseg_cached */
+#define XCHAL_KSEG_CACHED_SIZE 0x08000000 /* size in bytes of kseg_cached (assumed power of 2!!!) */
+#define XCHAL_KSEG_BYPASS_VADDR 0xD8000000 /* virt.addr of kernel RAM bypass (uncached) static map */
+#define XCHAL_KSEG_BYPASS_PADDR 0x00000000 /* phys.addr of kseg_bypass */
+#define XCHAL_KSEG_BYPASS_SIZE 0x08000000 /* size in bytes of kseg_bypass (assumed power of 2!!!) */
+
+#define XCHAL_KIO_CACHED_VADDR 0xE0000000 /* virt.addr of kernel I/O cached static map */
+#define XCHAL_KIO_CACHED_PADDR 0xF0000000 /* phys.addr of kio_cached */
+#define XCHAL_KIO_CACHED_SIZE 0x10000000 /* size in bytes of kio_cached (assumed power of 2!!!) */
+#define XCHAL_KIO_BYPASS_VADDR 0xF0000000 /* virt.addr of kernel I/O bypass (uncached) static map */
+#define XCHAL_KIO_BYPASS_PADDR 0xF0000000 /* phys.addr of kio_bypass */
+#define XCHAL_KIO_BYPASS_SIZE 0x10000000 /* size in bytes of kio_bypass (assumed power of 2!!!) */
+
+#define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest non-static-mapped virtual addr area */
+#define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */
+/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of descending size. */
+#endif
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* number of write buffer entries */
+
+#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
+ (CoreID) set in the Xtensa Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
+
+/* These definitions describe the hardware targeted by this software: */
+#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
+#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
+#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
+#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
+#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
+#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
+#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
+#define XTHAL_HW_REL_T1050 1
+#define XTHAL_HW_REL_T1050_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+
+
+/*
+ * Miscellaneous special register fields:
+ */
+
+
+/* DBREAKC (special register number 160): */
+#define XCHAL_DBREAKC_VALIDMASK 0xC000003F /* bits of DBREAKC that are defined */
+/* MASK field: */
+#define XCHAL_DBREAKC_MASK_BITS 6 /* number of bits in MASK field */
+#define XCHAL_DBREAKC_MASK_NUM 64 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_MASK_SHIFT 0 /* position of MASK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_MASK_MASK 0x0000003F /* mask of bits in MASK field of DBREAKC */
+/* LOADBREAK field: */
+#define XCHAL_DBREAKC_LOADBREAK_BITS 1 /* number of bits in LOADBREAK field */
+#define XCHAL_DBREAKC_LOADBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_LOADBREAK_SHIFT 30 /* position of LOADBREAK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_LOADBREAK_MASK 0x40000000 /* mask of bits in LOADBREAK field of DBREAKC */
+/* STOREBREAK field: */
+#define XCHAL_DBREAKC_STOREBREAK_BITS 1 /* number of bits in STOREBREAK field */
+#define XCHAL_DBREAKC_STOREBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_STOREBREAK_SHIFT 31 /* position of STOREBREAK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_STOREBREAK_MASK 0x80000000 /* mask of bits in STOREBREAK field of DBREAKC */
+
+/* PS (special register number 230): */
+#define XCHAL_PS_VALIDMASK 0x00070FFF /* bits of PS that are defined */
+/* INTLEVEL field: */
+#define XCHAL_PS_INTLEVEL_BITS 4 /* number of bits in INTLEVEL field */
+#define XCHAL_PS_INTLEVEL_NUM 16 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_INTLEVEL_SHIFT 0 /* position of INTLEVEL bits in PS, starting from lsbit */
+#define XCHAL_PS_INTLEVEL_MASK 0x0000000F /* mask of bits in INTLEVEL field of PS */
+/* EXCM field: */
+#define XCHAL_PS_EXCM_BITS 1 /* number of bits in EXCM field */
+#define XCHAL_PS_EXCM_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_EXCM_SHIFT 4 /* position of EXCM bits in PS, starting from lsbit */
+#define XCHAL_PS_EXCM_MASK 0x00000010 /* mask of bits in EXCM field of PS */
+/* PROGSTACK field: */
+#define XCHAL_PS_PROGSTACK_BITS 1 /* number of bits in PROGSTACK field */
+#define XCHAL_PS_PROGSTACK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_PROGSTACK_SHIFT 5 /* position of PROGSTACK bits in PS, starting from lsbit */
+#define XCHAL_PS_PROGSTACK_MASK 0x00000020 /* mask of bits in PROGSTACK field of PS */
+/* RING field: */
+#define XCHAL_PS_RING_BITS 2 /* number of bits in RING field */
+#define XCHAL_PS_RING_NUM 4 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_RING_SHIFT 6 /* position of RING bits in PS, starting from lsbit */
+#define XCHAL_PS_RING_MASK 0x000000C0 /* mask of bits in RING field of PS */
+/* OWB field: */
+#define XCHAL_PS_OWB_BITS 4 /* number of bits in OWB field */
+#define XCHAL_PS_OWB_NUM 16 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_OWB_SHIFT 8 /* position of OWB bits in PS, starting from lsbit */
+#define XCHAL_PS_OWB_MASK 0x00000F00 /* mask of bits in OWB field of PS */
+/* CALLINC field: */
+#define XCHAL_PS_CALLINC_BITS 2 /* number of bits in CALLINC field */
+#define XCHAL_PS_CALLINC_NUM 4 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_CALLINC_SHIFT 16 /* position of CALLINC bits in PS, starting from lsbit */
+#define XCHAL_PS_CALLINC_MASK 0x00030000 /* mask of bits in CALLINC field of PS */
+/* WOE field: */
+#define XCHAL_PS_WOE_BITS 1 /* number of bits in WOE field */
+#define XCHAL_PS_WOE_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_WOE_SHIFT 18 /* position of WOE bits in PS, starting from lsbit */
+#define XCHAL_PS_WOE_MASK 0x00040000 /* mask of bits in WOE field of PS */
+
+/* EXCCAUSE (special register number 232): */
+#define XCHAL_EXCCAUSE_VALIDMASK 0x0000003F /* bits of EXCCAUSE that are defined */
+/* EXCCAUSE field: */
+#define XCHAL_EXCCAUSE_BITS 6 /* number of bits in EXCCAUSE register */
+#define XCHAL_EXCCAUSE_NUM 64 /* max number of possible causes (2^bits) */
+#define XCHAL_EXCCAUSE_SHIFT 0 /* position of EXCCAUSE bits in register, starting from lsbit */
+#define XCHAL_EXCCAUSE_MASK 0x0000003F /* mask of bits in EXCCAUSE register */
+
+/* DEBUGCAUSE (special register number 233): */
+#define XCHAL_DEBUGCAUSE_VALIDMASK 0x0000003F /* bits of DEBUGCAUSE that are defined */
+/* ICOUNT field: */
+#define XCHAL_DEBUGCAUSE_ICOUNT_BITS 1 /* number of bits in ICOUNT field */
+#define XCHAL_DEBUGCAUSE_ICOUNT_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_ICOUNT_SHIFT 0 /* position of ICOUNT bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_ICOUNT_MASK 0x00000001 /* mask of bits in ICOUNT field of DEBUGCAUSE */
+/* IBREAK field: */
+#define XCHAL_DEBUGCAUSE_IBREAK_BITS 1 /* number of bits in IBREAK field */
+#define XCHAL_DEBUGCAUSE_IBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_IBREAK_SHIFT 1 /* position of IBREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_IBREAK_MASK 0x00000002 /* mask of bits in IBREAK field of DEBUGCAUSE */
+/* DBREAK field: */
+#define XCHAL_DEBUGCAUSE_DBREAK_BITS 1 /* number of bits in DBREAK field */
+#define XCHAL_DEBUGCAUSE_DBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_DBREAK_SHIFT 2 /* position of DBREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_DBREAK_MASK 0x00000004 /* mask of bits in DBREAK field of DEBUGCAUSE */
+/* BREAK field: */
+#define XCHAL_DEBUGCAUSE_BREAK_BITS 1 /* number of bits in BREAK field */
+#define XCHAL_DEBUGCAUSE_BREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_BREAK_SHIFT 3 /* position of BREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_BREAK_MASK 0x00000008 /* mask of bits in BREAK field of DEBUGCAUSE */
+/* BREAKN field: */
+#define XCHAL_DEBUGCAUSE_BREAKN_BITS 1 /* number of bits in BREAKN field */
+#define XCHAL_DEBUGCAUSE_BREAKN_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_BREAKN_SHIFT 4 /* position of BREAKN bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_BREAKN_MASK 0x00000010 /* mask of bits in BREAKN field of DEBUGCAUSE */
+/* DEBUGINT field: */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_BITS 1 /* number of bits in DEBUGINT field */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_SHIFT 5 /* position of DEBUGINT bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_MASK 0x00000020 /* mask of bits in DEBUGINT field of DEBUGCAUSE */
+
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_DENSITY 1 /* 1 if density option configured, 0 otherwise */
+#define XCHAL_HAVE_LOOPS 1 /* 1 if zero-overhead loops option configured, 0 otherwise */
+/* Misc instructions: */
+#define XCHAL_HAVE_NSA 0 /* 1 if NSA/NSAU instructions option configured, 0 otherwise */
+#define XCHAL_HAVE_MINMAX 0 /* 1 if MIN/MAX instructions option configured, 0 otherwise */
+#define XCHAL_HAVE_SEXT 0 /* 1 if sign-extend instruction option configured, 0 otherwise */
+#define XCHAL_HAVE_CLAMPS 0 /* 1 if CLAMPS instruction option configured, 0 otherwise */
+#define XCHAL_HAVE_MAC16 0 /* 1 if MAC16 option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL16 0 /* 1 if 16-bit integer multiply option configured, 0 otherwise */
+/*#define XCHAL_HAVE_POPC 0*/ /* 1 if CRC instruction option configured, 0 otherwise */
+/*#define XCHAL_HAVE_CRC 0*/ /* 1 if POPC instruction option configured, 0 otherwise */
+
+#define XCHAL_HAVE_SPECULATION 0 /* 1 if speculation option configured, 0 otherwise */
+/*#define XCHAL_HAVE_MP_SYNC 0*/ /* 1 if multiprocessor sync. option configured, 0 otherwise */
+#define XCHAL_HAVE_PRID 0 /* 1 if processor ID register configured, 0 otherwise */
+
+#define XCHAL_NUM_MISC_REGS 2 /* number of miscellaneous registers (0..4) */
+
+/* These relate a bit more to TIE: */
+#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
+#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
+
+
+/*----------------------------------------------------------------------
+ DERIVED
+ ----------------------------------------------------------------------*/
+
+#if XCHAL_HAVE_BE
+#define XCHAL_INST_ILLN 0xD60F /* 2-byte illegal instruction, msb-first */
+#define XCHAL_INST_ILLN_BYTE0 0xD6 /* 2-byte illegal instruction, 1st byte */
+#define XCHAL_INST_ILLN_BYTE1 0x0F /* 2-byte illegal instruction, 2nd byte */
+#else
+#define XCHAL_INST_ILLN 0xF06D /* 2-byte illegal instruction, lsb-first */
+#define XCHAL_INST_ILLN_BYTE0 0x6D /* 2-byte illegal instruction, 1st byte */
+#define XCHAL_INST_ILLN_BYTE1 0xF0 /* 2-byte illegal instruction, 2nd byte */
+#endif
+/* Belongs in xtensa/hal.h: */
+#define XTHAL_INST_ILL 0x000000 /* 3-byte illegal instruction */
+
+
+/*
+ * Because information as to exactly which hardware release is targeted
+ * by a given software build is not always available, compile-time HAL
+ * Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE):
+ */
+#ifndef XCHAL_HW_RELEASE_MAJOR
+# define XCHAL_HW_CONFIGID_RELIABLE 0
+#endif
+#if XCHAL_HW_CONFIGID_RELIABLE
+# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) (XTHAL_REL_LE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) (XTHAL_REL_GE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_AT(major,minor) (XTHAL_REL_EQ( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_MAJOR_AT(major) ((XCHAL_HW_RELEASE_MAJOR == (major)) ? 1 : 0)
+#else
+# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) ( ((major) < 1040 && XCHAL_HAVE_XEA2) ? 0 \
+ : ((major) > 1050 && XCHAL_HAVE_XEA1) ? 1 \
+ : XTHAL_MAYBE )
+# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) ( ((major) >= 2000 && XCHAL_HAVE_XEA1) ? 0 \
+ : (XTHAL_REL_LE(major,minor, 1040,0) && XCHAL_HAVE_XEA2) ? 1 \
+ : XTHAL_MAYBE )
+# define XCHAL_HW_RELEASE_AT(major,minor) ( (((major) < 1040 && XCHAL_HAVE_XEA2) || \
+ ((major) >= 2000 && XCHAL_HAVE_XEA1)) ? 0 : XTHAL_MAYBE)
+# define XCHAL_HW_RELEASE_MAJOR_AT(major) XCHAL_HW_RELEASE_AT(major,0)
+#endif
+
+/*
+ * Specific errata:
+ */
+
+/*
+ * Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1;
+ * relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled):
+ */
+#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN (XCHAL_HAVE_XEA1 && \
+ (XCHAL_HW_RELEASE_AT_OR_BELOW(1040,2) != 0 \
+ || XCHAL_HW_RELEASE_AT(1050,0)))
+
+
+
+#endif /*XTENSA_CONFIG_CORE_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/defs.h b/include/asm-xtensa/xtensa/config-linux_be/defs.h
new file mode 100644
index 00000000000..f7c58b27337
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/defs.h
@@ -0,0 +1,270 @@
+/* Definitions for Xtensa instructions, types, and protos. */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+/* Do not modify. This is automatically generated.*/
+
+#ifndef _XTENSA_BASE_HEADER
+#define _XTENSA_BASE_HEADER
+
+#ifdef __XTENSA__
+#if defined(__GNUC__) && !defined(__XCC__)
+
+#define L8UI_ASM(arr, ars, imm) { \
+ __asm__ volatile("l8ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L8UI(ars, imm) \
+({ \
+ unsigned char _arr; \
+ const unsigned char *_ars = ars; \
+ L8UI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L16UI_ASM(arr, ars, imm) { \
+ __asm__ volatile("l16ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L16UI(ars, imm) \
+({ \
+ unsigned short _arr; \
+ const unsigned short *_ars = ars; \
+ L16UI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L16SI_ASM(arr, ars, imm) {\
+ __asm__ volatile("l16si %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L16SI(ars, imm) \
+({ \
+ signed short _arr; \
+ const signed short *_ars = ars; \
+ L16SI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L32I_ASM(arr, ars, imm) { \
+ __asm__ volatile("l32i %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L32I(ars, imm) \
+({ \
+ unsigned _arr; \
+ const unsigned *_ars = ars; \
+ L32I_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define S8I_ASM(arr, ars, imm) {\
+ __asm__ volatile("s8i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S8I(arr, ars, imm) \
+({ \
+ signed char _arr = arr; \
+ const signed char *_ars = ars; \
+ S8I_ASM(_arr, _ars, imm); \
+})
+
+#define S16I_ASM(arr, ars, imm) {\
+ __asm__ volatile("s16i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S16I(arr, ars, imm) \
+({ \
+ signed short _arr = arr; \
+ const signed short *_ars = ars; \
+ S16I_ASM(_arr, _ars, imm); \
+})
+
+#define S32I_ASM(arr, ars, imm) { \
+ __asm__ volatile("s32i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S32I(arr, ars, imm) \
+({ \
+ signed int _arr = arr; \
+ const signed int *_ars = ars; \
+ S32I_ASM(_arr, _ars, imm); \
+})
+
+#define ADDI_ASM(art, ars, imm) {\
+ __asm__ ("addi %0, %1, %2" : "=a" (art) : "a" (ars), "i" (imm)); \
+}
+
+#define XT_ADDI(ars, imm) \
+({ \
+ unsigned _art; \
+ unsigned _ars = ars; \
+ ADDI_ASM(_art, _ars, imm); \
+ _art; \
+})
+
+#define ABS_ASM(arr, art) {\
+ __asm__ ("abs %0, %1" : "=a" (arr) : "a" (art)); \
+}
+
+#define XT_ABS(art) \
+({ \
+ unsigned _arr; \
+ signed _art = art; \
+ ABS_ASM(_arr, _art); \
+ _arr; \
+})
+
+/* Note: In the following macros that reference SAR, the magic "state"
+ register is used to capture the dependency on SAR. This is because
+ SAR is a 5-bit register and thus there are no C types that can be
+ used to represent it. It doesn't appear that the SAR register is
+ even relevant to GCC, but it is marked as "clobbered" just in
+ case. */
+
+#define SRC_ASM(arr, ars, art) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("src %0, %1, %2" \
+ : "=a" (arr) : "a" (ars), "a" (art), "t" (_xt_sar)); \
+}
+
+#define XT_SRC(ars, art) \
+({ \
+ unsigned _arr; \
+ unsigned _ars = ars; \
+ unsigned _art = art; \
+ SRC_ASM(_arr, _ars, _art); \
+ _arr; \
+})
+
+#define SSR_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssr %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSR(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSR_ASM(_ars); \
+})
+
+#define SSL_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssl %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSL(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSL_ASM(_ars); \
+})
+
+#define SSA8B_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssa8b %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSA8B(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSA8B_ASM(_ars); \
+})
+
+#define SSA8L_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssa8l %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSA8L(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSA8L_ASM(_ars); \
+})
+
+#define SSAI_ASM(imm) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssai %1" : "=t" (_xt_sar) : "i" (imm) : "sar"); \
+}
+
+#define XT_SSAI(imm) \
+({ \
+ SSAI_ASM(imm); \
+})
+
+
+
+
+
+
+
+
+#endif /* __GNUC__ && !__XCC__ */
+
+#ifdef __XCC__
+
+/* Core load/store instructions */
+extern unsigned char _TIE_L8UI(const unsigned char * ars, immediate imm);
+extern unsigned short _TIE_L16UI(const unsigned short * ars, immediate imm);
+extern signed short _TIE_L16SI(const signed short * ars, immediate imm);
+extern unsigned _TIE_L32I(const unsigned * ars, immediate imm);
+extern void _TIE_S8I(unsigned char arr, unsigned char * ars, immediate imm);
+extern void _TIE_S16I(unsigned short arr, unsigned short * ars, immediate imm);
+extern void _TIE_S32I(unsigned arr, unsigned * ars, immediate imm);
+
+#define XT_L8UI _TIE_L8UI
+#define XT_L16UI _TIE_L16UI
+#define XT_L16SI _TIE_L16SI
+#define XT_L32I _TIE_L32I
+#define XT_S8I _TIE_S8I
+#define XT_S16I _TIE_S16I
+#define XT_S32I _TIE_S32I
+
+/* Add-immediate instruction */
+extern unsigned _TIE_ADDI(unsigned ars, immediate imm);
+#define XT_ADDI _TIE_ADDI
+
+/* Absolute value instruction */
+extern unsigned _TIE_ABS(int art);
+#define XT_ABS _TIE_ABS
+
+/* funnel shift instructions */
+extern unsigned _TIE_SRC(unsigned ars, unsigned art);
+#define XT_SRC _TIE_SRC
+extern void _TIE_SSR(unsigned ars);
+#define XT_SSR _TIE_SSR
+extern void _TIE_SSL(unsigned ars);
+#define XT_SSL _TIE_SSL
+extern void _TIE_SSA8B(unsigned ars);
+#define XT_SSA8B _TIE_SSA8B
+extern void _TIE_SSA8L(unsigned ars);
+#define XT_SSA8L _TIE_SSA8L
+extern void _TIE_SSAI(immediate imm);
+#define XT_SSAI _TIE_SSAI
+
+
+#endif /* __XCC__ */
+
+#endif /* __XTENSA__ */
+#endif /* !_XTENSA_BASE_HEADER */
diff --git a/include/asm-xtensa/xtensa/config-linux_be/specreg.h b/include/asm-xtensa/xtensa/config-linux_be/specreg.h
new file mode 100644
index 00000000000..fa4106aa9a0
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/specreg.h
@@ -0,0 +1,99 @@
+/*
+ * Xtensa Special Register symbolic names
+ */
+
+/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef XTENSA_SPECREG_H
+#define XTENSA_SPECREG_H
+
+/* Include these special register bitfield definitions, for historical reasons: */
+#include <xtensa/corebits.h>
+
+
+/* Special registers: */
+#define LBEG 0
+#define LEND 1
+#define LCOUNT 2
+#define SAR 3
+#define WINDOWBASE 72
+#define WINDOWSTART 73
+#define PTEVADDR 83
+#define RASID 90
+#define ITLBCFG 91
+#define DTLBCFG 92
+#define IBREAKENABLE 96
+#define DDR 104
+#define IBREAKA_0 128
+#define IBREAKA_1 129
+#define DBREAKA_0 144
+#define DBREAKA_1 145
+#define DBREAKC_0 160
+#define DBREAKC_1 161
+#define EPC_1 177
+#define EPC_2 178
+#define EPC_3 179
+#define EPC_4 180
+#define DEPC 192
+#define EPS_2 194
+#define EPS_3 195
+#define EPS_4 196
+#define EXCSAVE_1 209
+#define EXCSAVE_2 210
+#define EXCSAVE_3 211
+#define EXCSAVE_4 212
+#define INTERRUPT 226
+#define INTENABLE 228
+#define PS 230
+#define EXCCAUSE 232
+#define DEBUGCAUSE 233
+#define CCOUNT 234
+#define ICOUNT 236
+#define ICOUNTLEVEL 237
+#define EXCVADDR 238
+#define CCOMPARE_0 240
+#define CCOMPARE_1 241
+#define CCOMPARE_2 242
+#define MISC_REG_0 244
+#define MISC_REG_1 245
+
+/* Special cases (bases of special register series): */
+#define IBREAKA 128
+#define DBREAKA 144
+#define DBREAKC 160
+#define EPC 176
+#define EPS 192
+#define EXCSAVE 208
+#define CCOMPARE 240
+
+/* Special names for read-only and write-only interrupt registers: */
+#define INTREAD 226
+#define INTSET 226
+#define INTCLEAR 227
+
+#endif /* XTENSA_SPECREG_H */
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/system.h b/include/asm-xtensa/xtensa/config-linux_be/system.h
new file mode 100644
index 00000000000..cf9d4d308e3
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/system.h
@@ -0,0 +1,198 @@
+/*
+ * xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
+ *
+ * NOTE: The location and contents of this file are highly subject to change.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * The HAL itself has historically included this file in some instances,
+ * but this is not appropriate either, because the HAL is meant to be
+ * core-specific but system independent.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_SYSTEM_H
+#define XTENSA_CONFIG_SYSTEM_H
+
+/*#include <xtensa/hal.h>*/
+
+
+
+/*----------------------------------------------------------------------
+ DEVICE ADDRESSES
+ ----------------------------------------------------------------------*/
+
+/*
+ * Strange place to find these, but the configuration GUI
+ * allows moving these around to account for various core
+ * configurations. Specific boards (and their BSP software)
+ * will have specific meanings for these components.
+ */
+
+/* I/O Block areas: */
+#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000
+#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000
+#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
+
+#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000
+#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000
+#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
+
+/* System ROM: */
+#define XSHAL_ROM_VADDR 0xEE000000
+#define XSHAL_ROM_PADDR 0xFE000000
+#define XSHAL_ROM_SIZE 0x00400000
+/* Largest available area (free of vectors): */
+#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C
+#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4
+
+/* System RAM: */
+#define XSHAL_RAM_VADDR 0xD0000000
+#define XSHAL_RAM_PADDR 0x00000000
+#define XSHAL_RAM_VSIZE 0x08000000
+#define XSHAL_RAM_PSIZE 0x10000000
+#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
+/* Largest available area (free of vectors): */
+#define XSHAL_RAM_AVAIL_VADDR 0xD0000370
+#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90
+
+/*
+ * Shadow system RAM (same device as system RAM, at different address).
+ * (Emulation boards need this for the SONIC Ethernet driver
+ * when data caches are configured for writeback mode.)
+ * NOTE: on full MMU configs, this points to the BYPASS virtual address
+ * of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
+ * addresses are viewed through the BYPASS static map rather than
+ * the CACHED static map.
+ */
+#define XSHAL_RAM_BYPASS_VADDR 0xD8000000
+#define XSHAL_RAM_BYPASS_PADDR 0x00000000
+#define XSHAL_RAM_BYPASS_PSIZE 0x08000000
+
+/* Alternate system RAM (different device than system RAM): */
+#define XSHAL_ALTRAM_VADDR 0xCEE00000
+#define XSHAL_ALTRAM_PADDR 0xC0000000
+#define XSHAL_ALTRAM_SIZE 0x00200000
+
+
+/*----------------------------------------------------------------------
+ * DEVICE-ADDRESS DEPENDENT...
+ *
+ * Values written to CACHEATTR special register (or its equivalent)
+ * to enable and disable caches in various modes.
+ *----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------
+ BACKWARD COMPATIBILITY ...
+ ----------------------------------------------------------------------*/
+
+/*
+ * NOTE: the following two macros are DEPRECATED. Use the latter
+ * board-specific macros instead, which are specially tuned for the
+ * particular target environments' memory maps.
+ */
+#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
+#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
+
+/*----------------------------------------------------------------------
+ ISS (Instruction Set Simulator) SPECIFIC ...
+ ----------------------------------------------------------------------*/
+
+#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */
+#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */
+#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */
+#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
+#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+/* For Coware only: */
+#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */
+#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */
+#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */
+#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
+#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+/* For BFM and other purposes: */
+#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */
+#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */
+
+#define XSHAL_ISS_PIPE_REGIONS 0
+#define XSHAL_ISS_SDRAM_REGIONS 0
+
+
+/*----------------------------------------------------------------------
+ XT2000 BOARD SPECIFIC ...
+ ----------------------------------------------------------------------*/
+
+#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */
+#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */
+#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */
+#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */
+#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */
+#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */
+
+
+/*----------------------------------------------------------------------
+ VECTOR SIZES
+ ----------------------------------------------------------------------*/
+
+/*
+ * Sizes allocated to vectors by the system (memory map) configuration.
+ * These sizes are constrained by core configuration (eg. one vector's
+ * code cannot overflow into another vector) but are dependent on the
+ * system or board (or LSP) memory map configuration.
+ *
+ * Whether or not each vector happens to be in a system ROM is also
+ * a system configuration matter, sometimes useful, included here also:
+ */
+#define XSHAL_RESET_VECTOR_SIZE 0x000004E0
+#define XSHAL_RESET_VECTOR_ISROM 1
+#define XSHAL_USER_VECTOR_SIZE 0x0000001C
+#define XSHAL_USER_VECTOR_ISROM 0
+#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
+#define XSHAL_KERNEL_VECTOR_ISROM 0
+#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0
+#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
+#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180
+#define XSHAL_WINDOW_VECTORS_ISROM 0
+#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
+#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL3_VECTOR_ISROM 0
+#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL4_VECTOR_ISROM 1
+#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE
+#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM
+
+
+#endif /*XTENSA_CONFIG_SYSTEM_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/tie.h b/include/asm-xtensa/xtensa/config-linux_be/tie.h
new file mode 100644
index 00000000000..3c2e514602f
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/tie.h
@@ -0,0 +1,275 @@
+/*
+ * xtensa/config/tie.h -- HAL definitions that are dependent on CORE and TIE configuration
+ *
+ * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
+ * It was generated for a specific Xtensa processor configuration,
+ * and furthermore for a specific set of TIE source files that extend
+ * basic core functionality.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL source itself to include this file.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_TIE_H
+#define XTENSA_CONFIG_TIE_H
+
+#include <xtensa/hal.h>
+
+
+/*----------------------------------------------------------------------
+ GENERAL
+ ----------------------------------------------------------------------*/
+
+/*
+ * Separators for macros that expand into arrays.
+ * These can be predefined by files that #include this one,
+ * when different separators are required.
+ */
+/* Element separator for macros that expand into 1-dimensional arrays: */
+#ifndef XCHAL_SEP
+#define XCHAL_SEP ,
+#endif
+/* Array separator for macros that expand into 2-dimensional arrays: */
+#ifndef XCHAL_SEP2
+#define XCHAL_SEP2 },{
+#endif
+
+
+
+
+
+
+/*----------------------------------------------------------------------
+ COPROCESSORS and EXTRA STATE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_CP_NUM 0 /* number of coprocessors */
+#define XCHAL_CP_MAX 0 /* max coprocessor id plus one (0 if none) */
+#define XCHAL_CP_MASK 0x00 /* bitmask of coprocessors by id */
+
+/* Space for coprocessors' state save areas: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP7_SA_SIZE 0
+/* Minimum required alignments of CP state save areas: */
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_ALIGN 1
+
+/* Indexing macros: */
+#define _XCHAL_CP_SA_SIZE(n) XCHAL_CP ## n ## _SA_SIZE
+#define XCHAL_CP_SA_SIZE(n) _XCHAL_CP_SA_SIZE(n) /* n = 0 .. 7 */
+#define _XCHAL_CP_SA_ALIGN(n) XCHAL_CP ## n ## _SA_ALIGN
+#define XCHAL_CP_SA_ALIGN(n) _XCHAL_CP_SA_ALIGN(n) /* n = 0 .. 7 */
+
+
+/* Space for "extra" state (user special registers and non-cp TIE) save area: */
+#define XCHAL_EXTRA_SA_SIZE 0
+#define XCHAL_EXTRA_SA_ALIGN 1
+
+/* Total save area size (extra + all coprocessors) */
+/* (not useful until xthal_{save,restore}_all_extra() is implemented, */
+/* but included for Tor2 beta; doesn't account for alignment!): */
+#define XCHAL_CPEXTRA_SA_SIZE_TOR2 0 /* Tor2Beta temporary definition -- do not use */
+
+/* Combined required alignment for all CP and EXTRA state save areas */
+/* (does not include required alignment for any base config registers): */
+#define XCHAL_CPEXTRA_SA_ALIGN 1
+
+/* ... */
+
+
+#ifdef _ASMLANGUAGE
+/*
+ * Assembly-language specific definitions (assembly macros, etc.).
+ */
+#include <xtensa/config/specreg.h>
+
+/********************
+ * Macros to save and restore the non-coprocessor TIE portion of EXTRA state.
+ */
+
+/* (none) */
+
+
+/********************
+ * Macros to create functions that save and restore all EXTRA (non-coprocessor) state
+ * (does not include zero-overhead loop registers and non-optional registers).
+ */
+
+ /*
+ * Macro that expands to the body of a function that
+ * stores the extra (non-coprocessor) optional/custom state.
+ * Entry: a2 = ptr to save area in which to save extra state
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_extra_store_funcbody
+ .endm
+
+
+ /*
+ * Macro that expands to the body of a function that
+ * loads the extra (non-coprocessor) optional/custom state.
+ * Entry: a2 = ptr to save area from which to restore extra state
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_extra_load_funcbody
+ .endm
+
+
+/********************
+ * Macros to save and restore the state of each TIE coprocessor.
+ */
+
+
+
+/********************
+ * Macros to create functions that save and restore the state of *any* TIE coprocessor.
+ */
+
+ /*
+ * Macro that expands to the body of a function
+ * that stores the selected coprocessor's state (registers etc).
+ * Entry: a2 = ptr to save area in which to save cp state
+ * a3 = coprocessor number
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_cpi_store_funcbody
+ .endm
+
+
+ /*
+ * Macro that expands to the body of a function
+ * that loads the selected coprocessor's state (registers etc).
+ * Entry: a2 = ptr to save area from which to restore cp state
+ * a3 = coprocessor number
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_cpi_load_funcbody
+ .endm
+
+#endif /*_ASMLANGUAGE*/
+
+
+/*
+ * Contents of save areas in terms of libdb register numbers.
+ * NOTE: CONTENTS_LIBDB_{UREG,REGF} macros are not defined in this file;
+ * it is up to the user of this header file to define these macros
+ * usefully before each expansion of the CONTENTS_LIBDB macros.
+ * (Fields rsv[123] are reserved for future additions; they are currently
+ * set to zero but may be set to some useful values in the future.)
+ *
+ * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, numentries, contentsize, regname_base, regfile_name, rsv2, rsv3)
+ */
+
+#define XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_EXTRA_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP0_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP0_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP1_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP1_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP2_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP2_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP3_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP3_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP4_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP4_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP5_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP5_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP6_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP6_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP7_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP7_SA_CONTENTS_LIBDB /* empty */
+
+
+
+
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#if 0 /* is there something equivalent for user TIE? */
+#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
+ (CoreID) set in the Xtensa Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
+
+/* These definitions describe the hardware targeted by this software: */
+#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
+#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
+#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
+#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
+#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
+#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
+#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
+#define XTHAL_HW_REL_T1050 1
+#define XTHAL_HW_REL_T1050_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+#endif /*0*/
+
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#if 0 /* these probably don't belong here, but are related to or implemented using TIE */
+#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
+/* Misc instructions: */
+#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
+
+#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
+#endif /*0*/
+
+
+#endif /*XTENSA_CONFIG_TIE_H*/
+
diff --git a/include/asm-xtensa/xtensa/coreasm.h b/include/asm-xtensa/xtensa/coreasm.h
new file mode 100644
index 00000000000..a8cfb54c20a
--- /dev/null
+++ b/include/asm-xtensa/xtensa/coreasm.h
@@ -0,0 +1,526 @@
+#ifndef XTENSA_COREASM_H
+#define XTENSA_COREASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/coreasm.h -- assembler-specific
+ * definitions that depend on CORE configuration.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL itself to include this
+ * file.
+ *
+ * This file must NOT include xtensa/config/system.h. Any assembler
+ * header file that depends on system information should likely go in
+ * a new systemasm.h (or sysasm.h) header file.
+ *
+ * NOTE: macro beqi32 is NOT configuration-dependent, and is placed
+ * here til we will have configuration-independent header file.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/config/core.h>
+#include <xtensa/config/specreg.h>
+
+/*
+ * Assembly-language specific definitions (assembly macros, etc.).
+ */
+
+/*----------------------------------------------------------------------
+ * find_ms_setbit
+ *
+ * This macro finds the most significant bit that is set in <as>
+ * and return its index + <base> in <ad>, or <base> - 1 if <as> is zero.
+ * The index counts starting at zero for the lsbit, so the return
+ * value ranges from <base>-1 (no bit set) to <base>+31 (msbit set).
+ *
+ * Parameters:
+ * <ad> destination address register (any register)
+ * <as> source address register
+ * <at> temporary address register (must be different than <as>)
+ * <base> constant value added to result (usually 0 or 1)
+ * On entry:
+ * <ad> = undefined if different than <as>
+ * <as> = value whose most significant set bit is to be found
+ * <at> = undefined
+ * no other registers are used by this macro.
+ * On exit:
+ * <ad> = <base> + index of msbit set in original <as>,
+ * = <base> - 1 if original <as> was zero.
+ * <as> clobbered (if not <ad>)
+ * <at> clobbered (if not <ad>)
+ * Example:
+ * find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4
+ */
+
+ .macro find_ms_setbit ad, as, at, base
+#if XCHAL_HAVE_NSA
+ movi \at, 31+\base
+ nsau \as, \as // get index of \as, numbered from msbit (32 if absent)
+ sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent)
+#else /* XCHAL_HAVE_NSA */
+ movi \at, \base // start with result of 0 (point to lsbit of 32)
+
+ beqz \as, 2f // special case for zero argument: return -1
+ bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits)
+ addi \at, \at, 16 // no, increment result to upper 16 bits (of 32)
+ //srli \as, \as, 16 // check upper half (shift right 16 bits)
+ extui \as, \as, 16, 16 // check upper half (shift right 16 bits)
+1: bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits)
+ addi \at, \at, 8 // no, increment result to upper 8 bits (of 16)
+ srli \as, \as, 8 // shift right to check upper 8 bits
+1: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits)
+ addi \at, \at, 4 // no, increment result to upper 4 bits (of 8)
+ srli \as, \as, 4 // shift right 4 bits to check upper half
+1: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits)
+ addi \at, \at, 2 // no, increment result to upper 2 bits (of 4)
+ srli \as, \as, 2 // shift right 2 bits to check upper half
+1: bltui \as, 0x2, 1f // is it the lsbit?
+ addi \at, \at, 2 // no, increment result to upper bit (of 2)
+2: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1)
+ //srli \as, \as, 1
+1: // done! \at contains index of msbit set (or -1 if none set)
+ .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15)
+ mov \ad, \at // then move result to \ad
+ .endif
+#endif /* XCHAL_HAVE_NSA */
+ .endm // find_ms_setbit
+
+/*----------------------------------------------------------------------
+ * find_ls_setbit
+ *
+ * This macro finds the least significant bit that is set in <as>,
+ * and return its index in <ad>.
+ * Usage is the same as for the find_ms_setbit macro.
+ * Example:
+ * find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4
+ */
+
+ .macro find_ls_setbit ad, as, at, base
+ neg \at, \as // keep only the least-significant bit that is set...
+ and \as, \at, \as // ... in \as
+ find_ms_setbit \ad, \as, \at, \base
+ .endm // find_ls_setbit
+
+/*----------------------------------------------------------------------
+ * find_ls_one
+ *
+ * Same as find_ls_setbit with base zero.
+ * Source (as) and destination (ad) registers must be different.
+ * Provided for backward compatibility.
+ */
+
+ .macro find_ls_one ad, as
+ find_ls_setbit \ad, \as, \ad, 0
+ .endm // find_ls_one
+
+/*----------------------------------------------------------------------
+ * floop, floopnez, floopgtz, floopend
+ *
+ * These macros are used for fast inner loops that
+ * work whether or not the Loops options is configured.
+ * If the Loops option is configured, they simply use
+ * the zero-overhead LOOP instructions; otherwise
+ * they use explicit decrement and branch instructions.
+ *
+ * They are used in pairs, with floop, floopnez or floopgtz
+ * at the beginning of the loop, and floopend at the end.
+ *
+ * Each pair of loop macro calls must be given the loop count
+ * address register and a unique label for that loop.
+ *
+ * Example:
+ *
+ * movi a3, 16 // loop 16 times
+ * floop a3, myloop1
+ * :
+ * bnez a7, end1 // exit loop if a7 != 0
+ * :
+ * floopend a3, myloop1
+ * end1:
+ *
+ * Like the LOOP instructions, these macros cannot be
+ * nested, must include at least one instruction,
+ * cannot call functions inside the loop, etc.
+ * The loop can be exited by jumping to the instruction
+ * following floopend (or elsewhere outside the loop),
+ * or continued by jumping to a NOP instruction placed
+ * immediately before floopend.
+ *
+ * Unlike LOOP instructions, the register passed to floop*
+ * cannot be used inside the loop, because it is used as
+ * the loop counter if the Loops option is not configured.
+ * And its value is undefined after exiting the loop.
+ * And because the loop counter register is active inside
+ * the loop, you can't easily use this construct to loop
+ * across a register file using ROTW as you might with LOOP
+ * instructions, unless you copy the loop register along.
+ */
+
+ /* Named label version of the macros: */
+
+ .macro floop ar, endlabel
+ floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopnez ar, endlabel
+ floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopgtz ar, endlabel
+ floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopend ar, endlabel
+ floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ /* Numbered local label version of the macros: */
+#if 0 /*UNTESTED*/
+ .macro floop89 ar
+ floop_ \ar, 8, 9f
+ .endm
+
+ .macro floopnez89 ar
+ floopnez_ \ar, 8, 9f
+ .endm
+
+ .macro floopgtz89 ar
+ floopgtz_ \ar, 8, 9f
+ .endm
+
+ .macro floopend89 ar
+ floopend_ \ar, 8b, 9
+ .endm
+#endif /*0*/
+
+ /* Underlying version of the macros: */
+
+ .macro floop_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floop cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loop \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floop_
+
+ .macro floopnez_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floopnez cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loopnez \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+ beqz \ar, \endlabelref
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floopnez_
+
+ .macro floopgtz_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floopgtz cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loopgtz \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+ bltz \ar, \endlabelref
+ beqz \ar, \endlabelref
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floopgtz_
+
+
+ .macro floopend_ ar, startlabelref, endlabel
+ .ifndef _infloop_
+ .err // Error: floopend without matching floopXXX
+ .endif
+ .ifeq _infloop_
+ .err // Error: floopend without matching floopXXX
+ .endif
+ .set _infloop_, 0
+#if ! XCHAL_HAVE_LOOPS
+ bnez \ar, \startlabelref
+#endif /* XCHAL_HAVE_LOOPS */
+\endlabel:
+ .endm // floopend_
+
+/*----------------------------------------------------------------------
+ * crsil -- conditional RSIL (read/set interrupt level)
+ *
+ * Executes the RSIL instruction if it exists, else just reads PS.
+ * The RSIL instruction does not exist in the new exception architecture
+ * if the interrupt option is not selected.
+ */
+
+ .macro crsil ar, newlevel
+#if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS
+ rsil \ar, \newlevel
+#else
+ rsr \ar, PS
+#endif
+ .endm // crsil
+
+/*----------------------------------------------------------------------
+ * window_spill{4,8,12}
+ *
+ * These macros spill callers' register windows to the stack.
+ * They work for both privileged and non-privileged tasks.
+ * Must be called from a windowed ABI context, eg. within
+ * a windowed ABI function (ie. valid stack frame, window
+ * exceptions enabled, not in exception mode, etc).
+ *
+ * This macro requires a single invocation of the window_spill_common
+ * macro in the same assembly unit and section.
+ *
+ * Note that using window_spill{4,8,12} macros is more efficient
+ * than calling a function implemented using window_spill_function,
+ * because the latter needs extra code to figure out the size of
+ * the call to the spilling function.
+ *
+ * Example usage:
+ *
+ * .text
+ * .align 4
+ * .global some_function
+ * .type some_function,@function
+ * some_function:
+ * entry a1, 16
+ * :
+ * :
+ *
+ * window_spill4 // spill windows of some_function's callers; preserves a0..a3 only;
+ * // to use window_spill{8,12} in this example function we'd have
+ * // to increase space allocated by the entry instruction, because
+ * // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed
+ * // for call8/window_spill8 or call12/window_spill12 respectively.
+ * :
+ *
+ * retw
+ *
+ * window_spill_common // instantiates code used by window_spill4
+ *
+ *
+ * On entry:
+ * none (if window_spill4)
+ * stack frame has enough space allocated for call8 (if window_spill8)
+ * stack frame has enough space allocated for call12 (if window_spill12)
+ * On exit:
+ * a4..a15 clobbered (if window_spill4)
+ * a8..a15 clobbered (if window_spill8)
+ * a12..a15 clobbered (if window_spill12)
+ * no caller windows are in live registers
+ */
+
+ .macro window_spill4
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill4
+
+ .macro window_spill8
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill8
+
+ .macro window_spill12
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill12
+
+/*----------------------------------------------------------------------
+ * window_spill_function
+ *
+ * This macro outputs a function that will spill its caller's callers'
+ * register windows to the stack. Eg. it could be used to implement
+ * a version of xthal_window_spill() that works in non-privileged tasks.
+ * This works for both privileged and non-privileged tasks.
+ *
+ * Typical usage:
+ *
+ * .text
+ * .align 4
+ * .global my_spill_function
+ * .type my_spill_function,@function
+ * my_spill_function:
+ * window_spill_function
+ *
+ * On entry to resulting function:
+ * none
+ * On exit from resulting function:
+ * none (no caller windows are in live registers)
+ */
+
+ .macro window_spill_function
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 32
+ entry sp, 48
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ call8 .L__wdwspill_assist16 // called with call8, only need another 8
+ retw
+1: call12 .L__wdwspill_assist16 // called with call4, only need another 12
+ retw
+2: call4 .L__wdwspill_assist16 // called with call12, only need another 4
+ retw
+# elif XCHAL_NUM_AREGS == 64
+ entry sp, 48
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ call4 .L__wdwspill_assist52 // called with call8, only need a call4
+ retw
+1: call8 .L__wdwspill_assist52 // called with call4, only need a call8
+ retw
+2: call12 .L__wdwspill_assist40 // called with call12, can skip a call12
+ retw
+# elif XCHAL_NUM_AREGS == 16
+ entry sp, 16
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ movi a7, 0 // called with call8
+ retw
+1: movi a11, 0 // called with call4
+2: retw // if called with call12, everything already spilled
+
+// movi a15, 0 // trick to spill all but the direct caller
+// j 1f
+// // The entry instruction is magical in the assembler (gets auto-aligned)
+// // so we have to jump to it to avoid falling through the padding.
+// // We need entry/retw to know where to return.
+//1: entry sp, 16
+// retw
+# else
+# error "unrecognized address register file size"
+# endif
+#endif /* XCHAL_HAVE_WINDOWED */
+ window_spill_common
+ .endm // window_spill_function
+
+/*----------------------------------------------------------------------
+ * window_spill_common
+ *
+ * Common code used by any number of invocations of the window_spill##
+ * and window_spill_function macros.
+ *
+ * Must be instantiated exactly once within a given assembly unit,
+ * within call/j range of and same section as window_spill##
+ * macro invocations for that assembly unit.
+ * (Is automatically instantiated by the window_spill_function macro.)
+ */
+
+ .macro window_spill_common
+#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64)
+ .ifndef .L__wdwspill_defined
+# if XCHAL_NUM_AREGS >= 64
+.L__wdwspill_assist60:
+ entry sp, 32
+ call8 .L__wdwspill_assist52
+ retw
+.L__wdwspill_assist56:
+ entry sp, 16
+ call4 .L__wdwspill_assist52
+ retw
+.L__wdwspill_assist52:
+ entry sp, 48
+ call12 .L__wdwspill_assist40
+ retw
+.L__wdwspill_assist40:
+ entry sp, 48
+ call12 .L__wdwspill_assist28
+ retw
+# endif
+.L__wdwspill_assist28:
+ entry sp, 48
+ call12 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist24:
+ entry sp, 32
+ call8 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist20:
+ entry sp, 16
+ call4 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist16:
+ entry sp, 16
+ movi a15, 0
+ retw
+ .set .L__wdwspill_defined, 1
+ .endif
+#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */
+ .endm // window_spill_common
+
+/*----------------------------------------------------------------------
+ * beqi32
+ *
+ * macro implements version of beqi for arbitrary 32-bit immidiate value
+ *
+ * beqi32 ax, ay, imm32, label
+ *
+ * Compares value in register ax with imm32 value and jumps to label if
+ * equal. Clobberes register ay if needed
+ *
+ */
+ .macro beqi32 ax, ay, imm, label
+ .ifeq ((\imm-1) & ~7) // 1..8 ?
+ beqi \ax, \imm, \label
+ .else
+ .ifeq (\imm+1) // -1 ?
+ beqi \ax, \imm, \label
+ .else
+ .ifeq (\imm) // 0 ?
+ beqz \ax, \label
+ .else
+ // We could also handle immediates 10,12,16,32,64,128,256
+ // but it would be a long macro...
+ movi \ay, \imm
+ beq \ax, \ay, \label
+ .endif
+ .endif
+ .endif
+ .endm // beqi32
+
+#endif /*XTENSA_COREASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/corebits.h b/include/asm-xtensa/xtensa/corebits.h
new file mode 100644
index 00000000000..e578ade4163
--- /dev/null
+++ b/include/asm-xtensa/xtensa/corebits.h
@@ -0,0 +1,77 @@
+#ifndef XTENSA_COREBITS_H
+#define XTENSA_COREBITS_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * xtensa/corebits.h - Xtensa Special Register field positions and masks.
+ *
+ * (In previous releases, these were defined in specreg.h, a generated file.
+ * This file is not generated, i.e. it is processor configuration independent.)
+ */
+
+
+/* EXCCAUSE register fields: */
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+/* Exception causes (mostly incomplete!): */
+#define EXCCAUSE_ILLEGAL 0
+#define EXCCAUSE_SYSCALL 1
+#define EXCCAUSE_IFETCHERROR 2
+#define EXCCAUSE_LOADSTOREERROR 3
+#define EXCCAUSE_LEVEL1INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+
+/* PS register fields: */
+#define PS_WOE_SHIFT 18
+#define PS_WOE_MASK 0x00040000
+#define PS_WOE PS_WOE_MASK
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */
+#define PS_OWB_SHIFT 8
+#define PS_OWB_MASK 0x00000F00
+#define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */
+#define PS_UM_SHIFT 5
+#define PS_UM_MASK 0x00000020
+#define PS_UM PS_UM_MASK
+#define PS_EXCM_SHIFT 4
+#define PS_EXCM_MASK 0x00000010
+#define PS_EXCM PS_EXCM_MASK
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_MASK 0x0000000F
+#define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */
+/* Backward compatibility (deprecated): */
+#define PS_PROGSTACK_SHIFT PS_UM_SHIFT
+#define PS_PROGSTACK_MASK PS_UM_MASK
+#define PS_PROG_SHIFT PS_UM_SHIFT
+#define PS_PROG_MASK PS_UM_MASK
+#define PS_PROG PS_UM
+
+/* DBREAKCn register fields: */
+#define DBREAKC_MASK_SHIFT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOADBREAK_SHIFT 30
+#define DBREAKC_LOADBREAK_MASK 0x40000000
+#define DBREAKC_STOREBREAK_SHIFT 31
+#define DBREAKC_STOREBREAK_MASK 0x80000000
+
+/* DEBUGCAUSE register fields: */
+#define DEBUGCAUSE_DEBUGINT_SHIFT 5
+#define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */
+#define DEBUGCAUSE_BREAKN_SHIFT 4
+#define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_SHIFT 3
+#define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_SHIFT 2
+#define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_SHIFT 1
+#define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_SHIFT 0
+#define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */
+
+#endif /*XTENSA_COREBITS_H*/
+
diff --git a/include/asm-xtensa/xtensa/hal.h b/include/asm-xtensa/xtensa/hal.h
new file mode 100644
index 00000000000..d1047250545
--- /dev/null
+++ b/include/asm-xtensa/xtensa/hal.h
@@ -0,0 +1,822 @@
+#ifndef XTENSA_HAL_H
+#define XTENSA_HAL_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/hal.h -- contains a definition of the
+ * Core HAL interface.
+ *
+ * All definitions in this header file are independent of any specific
+ * Xtensa processor configuration. Thus an OS or other software can
+ * include this header file and be compiled into configuration-
+ * independent objects that can be distributed and eventually linked
+ * to the HAL library (libhal.a) to create a configuration-specific
+ * final executable.
+ *
+ * Certain definitions, however, are release-specific -- such as the
+ * XTHAL_RELEASE_xxx macros (or additions made in later releases).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*----------------------------------------------------------------------
+ Constant Definitions
+ (shared with assembly)
+ ----------------------------------------------------------------------*/
+
+/* Software release information (not configuration-specific!): */
+#define XTHAL_RELEASE_MAJOR 1050
+#define XTHAL_RELEASE_MINOR 0
+#define XTHAL_RELEASE_NAME "T1050.0-2002-08-06-eng0"
+#define XTHAL_RELEASE_INTERNAL "2002-08-06-eng0"
+#define XTHAL_REL_T1050 1
+#define XTHAL_REL_T1050_0 1
+#define XTHAL_REL_T1050_0_2002 1
+#define XTHAL_REL_T1050_0_2002_08 1
+#define XTHAL_REL_T1050_0_2002_08_06 1
+#define XTHAL_REL_T1050_0_2002_08_06_ENG0 1
+
+/* HAL version numbers (these names are for backward compatibility): */
+#define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR
+#define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR
+/*
+ * A bit of software release history on values of XTHAL_{MAJOR,MINOR}_REV:
+ *
+ * Release MAJOR MINOR Comment
+ * ======= ===== ===== =======
+ * T1015.n n/a n/a (HAL not yet available)
+ * T1020.{0,1,2} 0 1 (HAL beta)
+ * T1020.{3,4} 0 2 First release.
+ * T1020.n (n>4) 0 2 or >3 (TBD)
+ * T1030.0 0 1 (HAL beta)
+ * T1030.{1,2} 0 3 Equivalent to first release.
+ * T1030.n (n>=3) 0 >= 3 (TBD)
+ * T1040.n 1040 n Full CHAL available from T1040.2
+ * T1050.n 1050 n Current release.
+ *
+ *
+ * Note: there is a distinction between the software release with
+ * which something is compiled (accessible using XTHAL_RELEASE_* macros)
+ * and the software release with which the HAL library was compiled
+ * (accessible using Xthal_release_* global variables). This
+ * distinction is particularly relevant for vendors that distribute
+ * configuration-independent binaries (eg. an OS), where their customer
+ * might link it with a HAL of a different Xtensa software release.
+ * In this case, it may be appropriate for the OS to verify at run-time
+ * whether XTHAL_RELEASE_* and Xthal_release_* are compatible.
+ * [Guidelines as to which release is compatible with which are not
+ * currently provided explicitly, but might be inferred from reading
+ * OSKit documentation for all releases -- compatibility is also highly
+ * dependent on which HAL features are used. Each release is usually
+ * backward compatible, with very few exceptions if any.]
+ *
+ * Notes:
+ * Tornado 2.0 supported in T1020.3+, T1030.1+, and T1040.{0,1} only.
+ * Tornado 2.0.2 supported in T1040.2+, and T1050.
+ * Compile-time HAL port of NucleusPlus supported by T1040.2+ and T1050.
+ */
+
+
+/*
+ * Architectural limits, independent of configuration.
+ * Note that these are ISA-defined limits, not micro-architecture implementation
+ * limits enforced by the Xtensa Processor Generator (which may be stricter than
+ * these below).
+ */
+#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */
+#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */
+#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */
+ /* (as of T1040, implementation limit is 7: 0..6) */
+#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */
+ /* (as of T1040, implementation limit is 3: 0..2) */
+
+/* Misc: */
+#define XTHAL_LITTLEENDIAN 0
+#define XTHAL_BIGENDIAN 1
+
+
+/* Interrupt types: */
+#define XTHAL_INTTYPE_UNCONFIGURED 0
+#define XTHAL_INTTYPE_SOFTWARE 1
+#define XTHAL_INTTYPE_EXTERN_EDGE 2
+#define XTHAL_INTTYPE_EXTERN_LEVEL 3
+#define XTHAL_INTTYPE_TIMER 4
+#define XTHAL_INTTYPE_NMI 5
+#define XTHAL_MAX_INTTYPES 6 /* number of interrupt types */
+
+/* Timer related: */
+#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */
+#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */
+
+
+/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */
+#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none - generate exception on any access (aka "illegal") */
+#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI) - use cache on hit -- way from tag match [or H HC, or U UC] (ISA: same, except for Isolate case) */
+#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none - refill cache on miss -- way from LRU [or F FI fill] (ISA: Read/Write Miss Refill) */
+#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT - store immediately to memory (ISA: same) */
+#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none - use cache regardless of hit-vs-miss -- way from vaddr (ISA: use-cache-on-miss+hit) */
+#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G * - non-speculative; spec/replay refs not permitted */
+#if 0
+#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G * - mem accesses cannot be out of order */
+#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none - allow combining/merging multiple writes (to same datapath data unit) into one (implied by writeback) */
+#define XTHAL_AMB_COHERENT x /* 000 M MC fl?: Mem/MP Coherent M - on reads, other CPUs/bus-masters may need to supply data */
+#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none - memory will not bus error (if it does, handle as fatal imprecise interrupt) */
+#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none - on refill, read line+1 into prefetch buffers */
+#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none - access one of N stream buffers */
+#endif /*0*/
+
+#define XTHAL_AM_EXCEPTION (1<<XTHAL_AMB_EXCEPTION)
+#define XTHAL_AM_HITCACHE (1<<XTHAL_AMB_HITCACHE)
+#define XTHAL_AM_ALLOCATE (1<<XTHAL_AMB_ALLOCATE)
+#define XTHAL_AM_WRITETHRU (1<<XTHAL_AMB_WRITETHRU)
+#define XTHAL_AM_ISOLATE (1<<XTHAL_AMB_ISOLATE)
+#define XTHAL_AM_GUARD (1<<XTHAL_AMB_GUARD)
+#if 0
+#define XTHAL_AM_ORDERED (1<<XTHAL_AMB_ORDERED)
+#define XTHAL_AM_FUSEWRITES (1<<XTHAL_AMB_FUSEWRITES)
+#define XTHAL_AM_COHERENT (1<<XTHAL_AMB_COHERENT)
+#define XTHAL_AM_TRUSTED (1<<XTHAL_AMB_TRUSTED)
+#define XTHAL_AM_PREFETCH (1<<XTHAL_AMB_PREFETCH)
+#define XTHAL_AM_STREAM (1<<XTHAL_AMB_STREAM)
+#endif /*0*/
+
+/*
+ * Allowed Access Modes (bit combinations).
+ *
+ * Columns are:
+ * "FOGIWACE"
+ * Access mode bits (see XTHAL_AMB_xxx above).
+ * <letter> = bit is set
+ * '-' = bit is clear
+ * '.' = bit is irrelevant / don't care, as follows:
+ * E=1 makes all others irrelevant
+ * W,F relevant only for stores
+ * "2345"
+ * Indicates which Xtensa releases support the corresponding
+ * access mode. Releases for each character column are:
+ * 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1
+ * 3 = T1020.2 and later: T1020.2+, T1030
+ * 4 = T1040
+ * 5 = T1050 (maybe)
+ * And the character column contents are:
+ * <number> = support by release(s)
+ * "." = unsupported by release(s)
+ * "?" = support unknown
+ */
+ /* FOGIWACE 2345 */
+/* For instruction fetch: */
+#define XTHAL_FAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_FAM_ISOLATE 0x012 /* .--I.-C- .... isolate */
+#define XTHAL_FAM_BYPASS 0x000 /* .---.--- 2345 bypass */
+#define XTHAL_FAM_NACACHED 0x002 /* .---.-C- .... cached no-allocate (frozen) */
+#define XTHAL_FAM_CACHED 0x006 /* .---.AC- 2345 cached */
+/* For data load: */
+#define XTHAL_LAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_LAM_ISOLATE 0x012 /* .--I.-C- 2345 isolate */
+#define XTHAL_LAM_BYPASS 0x000 /* .O--.--- 2... bypass speculative */
+#define XTHAL_LAM_BYPASSG 0x020 /* .OG-.--- .345 bypass guarded */
+#define XTHAL_LAM_NACACHED 0x002 /* .O--.-C- 2... cached no-allocate speculative */
+#define XTHAL_LAM_NACACHEDG 0x022 /* .OG-.-C- .345 cached no-allocate guarded */
+#define XTHAL_LAM_CACHED 0x006 /* .---.AC- 2345 cached speculative */
+#define XTHAL_LAM_CACHEDG 0x026 /* .?G-.AC- .... cached guarded */
+/* For data store: */
+#define XTHAL_SAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_SAM_ISOLATE 0x032 /* .-GI--C- 2345 isolate */
+#define XTHAL_SAM_BYPASS 0x028 /* -OG-W--- 2345 bypass */
+/*efine XTHAL_SAM_BYPASSF 0x028*/ /* F-G-W--- ...? bypass write-combined */
+#define XTHAL_SAM_WRITETHRU 0x02A /* -OG-W-C- 234? writethrough */
+/*efine XTHAL_SAM_WRITETHRUF 0x02A*/ /* F-G-W-C- ...5 writethrough write-combined */
+#define XTHAL_SAM_WRITEALLOC 0x02E /* -OG-WAC- ...? writethrough-allocate */
+/*efine XTHAL_SAM_WRITEALLOCF 0x02E*/ /* F-G-WAC- ...? writethrough-allocate write-combined */
+#define XTHAL_SAM_WRITEBACK 0x026 /* F-G--AC- ...5 writeback */
+
+#if 0
+/*
+ Cache attribute encoding for CACHEATTR (per ISA):
+ (Note: if this differs from ISA Ref Manual, ISA has precedence)
+
+ Inst-fetches Loads Stores
+ ------------- ------------ -------------
+0x0 FCA_EXCEPTION ?LCA_NACACHED_G* SCA_WRITETHRU "uncached"
+0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached
+0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass
+0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate
+ or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
+0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK write-back
+ or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
+0x5..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved)
+0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate
+0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal
+ * Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G)
+*/
+#endif /*0*/
+
+
+#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*----------------------------------------------------------------------
+ HAL
+ ----------------------------------------------------------------------*/
+
+/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */
+extern const unsigned int Xthal_rev_no;
+
+
+/*----------------------------------------------------------------------
+ Processor State
+ ----------------------------------------------------------------------*/
+/* save & restore the extra processor state */
+extern void xthal_save_extra(void *base);
+extern void xthal_restore_extra(void *base);
+
+extern void xthal_save_cpregs(void *base, int);
+extern void xthal_restore_cpregs(void *base, int);
+
+/*extern void xthal_save_all_extra(void *base);*/
+/*extern void xthal_restore_all_extra(void *base);*/
+
+/* space for processor state */
+extern const unsigned int Xthal_extra_size;
+extern const unsigned int Xthal_extra_align;
+/* space for TIE register files */
+extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS];
+extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS];
+
+/* total of space for the processor state (for Tor2) */
+extern const unsigned int Xthal_all_extra_size;
+extern const unsigned int Xthal_all_extra_align;
+
+/* initialize the extra processor */
+/*extern void xthal_init_extra(void);*/
+/* initialize the TIE coprocessor */
+/*extern void xthal_init_cp(int);*/
+
+/* initialize the extra processor */
+extern void xthal_init_mem_extra(void *);
+/* initialize the TIE coprocessor */
+extern void xthal_init_mem_cp(void *, int);
+
+/* validate & invalidate the TIE register file */
+extern void xthal_validate_cp(int);
+extern void xthal_invalidate_cp(int);
+
+/* the number of TIE coprocessors contiguous from zero (for Tor2) */
+extern const unsigned int Xthal_num_coprocessors;
+
+/* actual number of coprocessors */
+extern const unsigned char Xthal_cp_num;
+/* index of highest numbered coprocessor, plus one */
+extern const unsigned char Xthal_cp_max;
+/* index of highest allowed coprocessor number, per cfg, plus one */
+/*extern const unsigned char Xthal_cp_maxcfg;*/
+/* bitmask of which coprocessors are present */
+extern const unsigned int Xthal_cp_mask;
+
+/* read and write cpenable register */
+extern void xthal_set_cpenable(unsigned);
+extern unsigned xthal_get_cpenable(void);
+
+/* read & write extra state register */
+/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/
+/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/
+
+/* read & write a TIE coprocessor register */
+/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/
+/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/
+
+/* return coprocessor number based on register */
+/*extern int xthal_which_cp(unsigned reg);*/
+
+/*----------------------------------------------------------------------
+ Interrupts
+ ----------------------------------------------------------------------*/
+
+/* the number of interrupt levels */
+extern const unsigned char Xthal_num_intlevels;
+/* the number of interrupts */
+extern const unsigned char Xthal_num_interrupts;
+
+/* mask for level of interrupts */
+extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS];
+/* mask for level 0 to N interrupts */
+extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS];
+
+/* level of each interrupt */
+extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS];
+
+/* type per interrupt */
+extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS];
+
+/* masks of each type of interrupt */
+extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES];
+
+/* interrupt numbers assigned to each timer interrupt */
+extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS];
+
+/*** Virtual interrupt prioritization: ***/
+
+/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */
+extern unsigned xthal_vpri_to_intlevel(unsigned vpri);
+extern unsigned xthal_intlevel_to_vpri(unsigned intlevel);
+
+/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */
+extern unsigned xthal_int_enable(unsigned);
+extern unsigned xthal_int_disable(unsigned);
+
+/* Set/get virtual priority of an interrupt: */
+extern int xthal_set_int_vpri(int intnum, int vpri);
+extern int xthal_get_int_vpri(int intnum);
+
+/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */
+extern void xthal_set_vpri_locklevel(unsigned intlevel);
+extern unsigned xthal_get_vpri_locklevel(void);
+
+/* Set/get current virtual interrupt priority: */
+extern unsigned xthal_set_vpri(unsigned vpri);
+extern unsigned xthal_get_vpri(unsigned vpri);
+extern unsigned xthal_set_vpri_intlevel(unsigned intlevel);
+extern unsigned xthal_set_vpri_lock(void);
+
+
+
+/*----------------------------------------------------------------------
+ Generic Interrupt Trampolining Support
+ ----------------------------------------------------------------------*/
+
+typedef void (XtHalVoidFunc)(void);
+
+/*
+ * Bitmask of interrupts currently trampolining down:
+ */
+extern unsigned Xthal_tram_pending;
+
+/*
+ * Bitmask of which interrupts currently trampolining down
+ * synchronously are actually enabled; this bitmask is necessary
+ * because INTENABLE cannot hold that state (sync-trampolining
+ * interrupts must be kept disabled while trampolining);
+ * in the current implementation, any bit set here is not set
+ * in INTENABLE, and vice-versa; once a sync-trampoline is
+ * handled (at level one), its enable bit must be moved from
+ * here to INTENABLE:
+ */
+extern unsigned Xthal_tram_enabled;
+
+/*
+ * Bitmask of interrupts configured for sync trampolining:
+ */
+extern unsigned Xthal_tram_sync;
+
+
+/* Trampoline support functions: */
+extern unsigned xthal_tram_pending_to_service( void );
+extern void xthal_tram_done( unsigned serviced_mask );
+extern int xthal_tram_set_sync( int intnum, int sync );
+extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn );
+
+/* INTENABLE,INTREAD,INTSET,INTCLEAR register access functions: */
+extern unsigned xthal_get_intenable( void );
+extern void xthal_set_intenable( unsigned );
+extern unsigned xthal_get_intread( void );
+extern void xthal_set_intset( unsigned );
+extern void xthal_set_intclear( unsigned );
+
+
+/*----------------------------------------------------------------------
+ Register Windows
+ ----------------------------------------------------------------------*/
+
+/* number of registers in register window */
+extern const unsigned int Xthal_num_aregs;
+extern const unsigned char Xthal_num_aregs_log2;
+
+/* This spill any live register windows (other than the caller's): */
+extern void xthal_window_spill( void );
+
+
+/*----------------------------------------------------------------------
+ Cache
+ ----------------------------------------------------------------------*/
+
+/* size of the cache lines in log2(bytes) */
+extern const unsigned char Xthal_icache_linewidth;
+extern const unsigned char Xthal_dcache_linewidth;
+/* size of the cache lines in bytes */
+extern const unsigned short Xthal_icache_linesize;
+extern const unsigned short Xthal_dcache_linesize;
+/* number of cache sets in log2(lines per way) */
+extern const unsigned char Xthal_icache_setwidth;
+extern const unsigned char Xthal_dcache_setwidth;
+/* cache set associativity (number of ways) */
+extern const unsigned int Xthal_icache_ways;
+extern const unsigned int Xthal_dcache_ways;
+/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */
+extern const unsigned int Xthal_icache_size;
+extern const unsigned int Xthal_dcache_size;
+/* cache features */
+extern const unsigned char Xthal_dcache_is_writeback;
+extern const unsigned char Xthal_icache_line_lockable;
+extern const unsigned char Xthal_dcache_line_lockable;
+
+/* cache attribute register control (used by other HAL routines) */
+extern unsigned xthal_get_cacheattr( void );
+extern unsigned xthal_get_icacheattr( void );
+extern unsigned xthal_get_dcacheattr( void );
+extern void xthal_set_cacheattr( unsigned );
+extern void xthal_set_icacheattr( unsigned );
+extern void xthal_set_dcacheattr( unsigned );
+
+/* initialize cache support (must be called once at startup, before all other cache calls) */
+/*extern void xthal_cache_startinit( void );*/
+/* reset caches */
+/*extern void xthal_icache_reset( void );*/
+/*extern void xthal_dcache_reset( void );*/
+/* enable caches */
+extern void xthal_icache_enable( void ); /* DEPRECATED */
+extern void xthal_dcache_enable( void ); /* DEPRECATED */
+/* disable caches */
+extern void xthal_icache_disable( void ); /* DEPRECATED */
+extern void xthal_dcache_disable( void ); /* DEPRECATED */
+
+/* invalidate the caches */
+extern void xthal_icache_all_invalidate( void );
+extern void xthal_dcache_all_invalidate( void );
+extern void xthal_icache_region_invalidate( void *addr, unsigned size );
+extern void xthal_dcache_region_invalidate( void *addr, unsigned size );
+extern void xthal_icache_line_invalidate(void *addr);
+extern void xthal_dcache_line_invalidate(void *addr);
+/* write dirty data back */
+extern void xthal_dcache_all_writeback( void );
+extern void xthal_dcache_region_writeback( void *addr, unsigned size );
+extern void xthal_dcache_line_writeback(void *addr);
+/* write dirty data back and invalidate */
+extern void xthal_dcache_all_writeback_inv( void );
+extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size );
+extern void xthal_dcache_line_writeback_inv(void *addr);
+/* prefetch and lock specified memory range into cache */
+extern void xthal_icache_region_lock( void *addr, unsigned size );
+extern void xthal_dcache_region_lock( void *addr, unsigned size );
+extern void xthal_icache_line_lock(void *addr);
+extern void xthal_dcache_line_lock(void *addr);
+/* unlock from cache */
+extern void xthal_icache_all_unlock( void );
+extern void xthal_dcache_all_unlock( void );
+extern void xthal_icache_region_unlock( void *addr, unsigned size );
+extern void xthal_dcache_region_unlock( void *addr, unsigned size );
+extern void xthal_icache_line_unlock(void *addr);
+extern void xthal_dcache_line_unlock(void *addr);
+
+
+/* sync icache and memory */
+extern void xthal_icache_sync( void );
+/* sync dcache and memory */
+extern void xthal_dcache_sync( void );
+
+/*----------------------------------------------------------------------
+ Debug
+ ----------------------------------------------------------------------*/
+
+/* 1 if debug option configured, 0 if not: */
+extern const int Xthal_debug_configured;
+
+/* Number of instruction and data break registers: */
+extern const int Xthal_num_ibreak;
+extern const int Xthal_num_dbreak;
+
+/* Set (plant) and remove software breakpoint, both synchronizing cache: */
+extern unsigned int xthal_set_soft_break(void *addr);
+extern void xthal_remove_soft_break(void *addr, unsigned int);
+
+
+/*----------------------------------------------------------------------
+ Disassembler
+ ----------------------------------------------------------------------*/
+
+/* Max expected size of the return buffer for a disassembled instruction (hint only): */
+#define XTHAL_DISASM_BUFSIZE 80
+
+/* Disassembly option bits for selecting what to return: */
+#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */
+#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */
+#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */
+#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */
+#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */
+
+/* routine to get a string for the disassembled instruction */
+extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr,
+ char *buffer, unsigned buflen, unsigned options );
+
+/* routine to get the size of the next instruction. Returns 0 for
+ illegal instruction */
+extern int xthal_disassemble_size( unsigned char *instr_buf );
+
+
+/*----------------------------------------------------------------------
+ Core Counter
+ ----------------------------------------------------------------------*/
+
+/* counter info */
+extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */
+extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */
+
+/* get CCOUNT register (if not present return 0) */
+extern unsigned xthal_get_ccount(void);
+
+/* set and get CCOMPAREn registers (if not present, get returns 0) */
+extern void xthal_set_ccompare(int, unsigned);
+extern unsigned xthal_get_ccompare(int);
+
+
+/*----------------------------------------------------------------------
+ Instruction/Data RAM/ROM Access
+ ----------------------------------------------------------------------*/
+
+extern void* xthal_memcpy(void *dst, const void *src, unsigned len);
+extern void* xthal_bcopy(const void *src, void *dst, unsigned len);
+
+/*----------------------------------------------------------------------
+ MP Synchronization
+ ----------------------------------------------------------------------*/
+extern int xthal_compare_and_set( int *addr, int test_val, int compare_val );
+extern unsigned xthal_get_prid( void );
+
+/*extern const char Xthal_have_s32c1i;*/
+extern const unsigned char Xthal_have_prid;
+
+
+/*----------------------------------------------------------------------
+ Miscellaneous
+ ----------------------------------------------------------------------*/
+
+extern const unsigned int Xthal_release_major;
+extern const unsigned int Xthal_release_minor;
+extern const char * const Xthal_release_name;
+extern const char * const Xthal_release_internal;
+
+extern const unsigned char Xthal_memory_order;
+extern const unsigned char Xthal_have_windowed;
+extern const unsigned char Xthal_have_density;
+extern const unsigned char Xthal_have_booleans;
+extern const unsigned char Xthal_have_loops;
+extern const unsigned char Xthal_have_nsa;
+extern const unsigned char Xthal_have_minmax;
+extern const unsigned char Xthal_have_sext;
+extern const unsigned char Xthal_have_clamps;
+extern const unsigned char Xthal_have_mac16;
+extern const unsigned char Xthal_have_mul16;
+extern const unsigned char Xthal_have_fp;
+extern const unsigned char Xthal_have_speculation;
+extern const unsigned char Xthal_have_exceptions;
+extern const unsigned char Xthal_xea_version;
+extern const unsigned char Xthal_have_interrupts;
+extern const unsigned char Xthal_have_highlevel_interrupts;
+extern const unsigned char Xthal_have_nmi;
+
+extern const unsigned short Xthal_num_writebuffer_entries;
+
+extern const unsigned int Xthal_build_unique_id;
+/* Release info for hardware targeted by software upgrades: */
+extern const unsigned int Xthal_hw_configid0;
+extern const unsigned int Xthal_hw_configid1;
+extern const unsigned int Xthal_hw_release_major;
+extern const unsigned int Xthal_hw_release_minor;
+extern const char * const Xthal_hw_release_name;
+extern const char * const Xthal_hw_release_internal;
+
+
+/* Internal memories... */
+
+extern const unsigned char Xthal_num_instrom;
+extern const unsigned char Xthal_num_instram;
+extern const unsigned char Xthal_num_datarom;
+extern const unsigned char Xthal_num_dataram;
+extern const unsigned char Xthal_num_xlmi;
+extern const unsigned int Xthal_instrom_vaddr[1];
+extern const unsigned int Xthal_instrom_paddr[1];
+extern const unsigned int Xthal_instrom_size [1];
+extern const unsigned int Xthal_instram_vaddr[1];
+extern const unsigned int Xthal_instram_paddr[1];
+extern const unsigned int Xthal_instram_size [1];
+extern const unsigned int Xthal_datarom_vaddr[1];
+extern const unsigned int Xthal_datarom_paddr[1];
+extern const unsigned int Xthal_datarom_size [1];
+extern const unsigned int Xthal_dataram_vaddr[1];
+extern const unsigned int Xthal_dataram_paddr[1];
+extern const unsigned int Xthal_dataram_size [1];
+extern const unsigned int Xthal_xlmi_vaddr[1];
+extern const unsigned int Xthal_xlmi_paddr[1];
+extern const unsigned int Xthal_xlmi_size [1];
+
+
+
+/*----------------------------------------------------------------------
+ Memory Management Unit
+ ----------------------------------------------------------------------*/
+
+extern const unsigned char Xthal_have_spanning_way;
+extern const unsigned char Xthal_have_identity_map;
+extern const unsigned char Xthal_have_mimic_cacheattr;
+extern const unsigned char Xthal_have_xlt_cacheattr;
+extern const unsigned char Xthal_have_cacheattr;
+extern const unsigned char Xthal_have_tlbs;
+
+extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */
+extern const unsigned char Xthal_mmu_asid_kernel;
+extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */
+extern const unsigned char Xthal_mmu_ring_bits;
+extern const unsigned char Xthal_mmu_sr_bits;
+extern const unsigned char Xthal_mmu_ca_bits;
+extern const unsigned int Xthal_mmu_max_pte_page_size;
+extern const unsigned int Xthal_mmu_min_pte_page_size;
+
+extern const unsigned char Xthal_itlb_way_bits;
+extern const unsigned char Xthal_itlb_ways;
+extern const unsigned char Xthal_itlb_arf_ways;
+extern const unsigned char Xthal_dtlb_way_bits;
+extern const unsigned char Xthal_dtlb_ways;
+extern const unsigned char Xthal_dtlb_arf_ways;
+
+/* Convert between virtual and physical addresses (through static maps only): */
+/*** WARNING: these two functions may go away in a future release; don't depend on them! ***/
+extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp );
+extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached );
+
+#if 0
+/******************* EXPERIMENTAL AND TENTATIVE ONLY ********************/
+
+#define XTHAL_MMU_PAGESZ_COUNT_MAX 8 /* maximum number of different page sizes */
+extern const char Xthal_mmu_pagesz_count; /* 0 .. 8 number of different page sizes configured */
+
+/* Note: the following table doesn't necessarily have page sizes in increasing order: */
+extern const char Xthal_mmu_pagesz_log2[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 10 .. 28 (0 past count) */
+
+/* Sorted (increasing) table of page sizes, that indexes into the above table: */
+extern const char Xthal_mmu_pagesz_sorted[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 0 .. 7 (0 past count) */
+
+/*u32 Xthal_virtual_exceptions;*/ /* bitmask of which exceptions execute in virtual mode... */
+
+extern const char Xthal_mmu_pte_pagesz_log2_min; /* ?? minimum page size in PTEs */
+extern const char Xthal_mmu_pte_pagesz_log2_max; /* ?? maximum page size in PTEs */
+
+/* Cache Attribute Bits Implemented by the Cache (part of the cache abstraction) */
+extern const char Xthal_icache_fca_bits_implemented; /* ITLB/UTLB only! */
+extern const char Xthal_dcache_lca_bits_implemented; /* DTLB/UTLB only! */
+extern const char Xthal_dcache_sca_bits_implemented; /* DTLB/UTLB only! */
+
+/* Per TLB Parameters (Instruction, Data, Unified) */
+struct XtHalMmuTlb Xthal_itlb; /* description of MMU I-TLB generic features */
+struct XtHalMmuTlb Xthal_dtlb; /* description of MMU D-TLB generic features */
+struct XtHalMmuTlb Xthal_utlb; /* description of MMU U-TLB generic features */
+
+#define XTHAL_MMU_WAYS_MAX 8 /* maximum number of ways (associativities) for each TLB */
+
+/* Structure for common information described for each possible TLB (instruction, data and unified): */
+typedef struct XtHalMmuTlb {
+ u8 va_bits; /* 32 (number of virtual address bits) */
+ u8 pa_bits; /* 32 (number of physical address bits) */
+ bool tlb_va_indexed; /* 1 (set if TLB is indexed by virtual address) */
+ bool tlb_va_tagged; /* 0 (set if TLB is tagged by virtual address) */
+ bool cache_va_indexed; /* 1 (set if cache is indexed by virtual address) */
+ bool cache_va_tagged; /* 0 (set if cache is tagged by virtual address) */
+ /*bool (whether page tables are traversed in vaddr sorted order, paddr sorted order, ...) */
+ /*u8 (set of available page attribute bits, other than cache attribute bits defined above) */
+ /*u32 (various masks for pages, MMU table/TLB entries, etc.) */
+ u8 way_count; /* 0 .. 8 (number of ways, a.k.a. associativities, for this TLB) */
+ XtHalMmuTlbWay * ways[XTHAL_MMU_WAYS_MAX]; /* pointers to per-way parms for each way */
+} XtHalMmuTlb;
+
+/* Per TLB Way (Per Associativity) Parameters */
+typedef struct XtHalMmuTlbWay {
+ u32 index_count_log2; /* 0 .. 4 */
+ u32 pagesz_mask; /* 0 .. 2^pagesz_count - 1 (each bit corresponds to a size */
+ /* defined in the Xthal_mmu_pagesz_log2[] table) */
+ u32 vpn_const_mask;
+ u32 vpn_const_value;
+ u64 ppn_const_mask; /* future may support pa_bits > 32 */
+ u64 ppn_const_value;
+ u32 ppn_id_mask; /* paddr bits taken directly from vaddr */
+ bool backgnd_match; /* 0 or 1 */
+ /* These are defined in terms of the XTHAL_CACHE_xxx bits: */
+ u8 fca_const_mask; /* ITLB/UTLB only! */
+ u8 fca_const_value; /* ITLB/UTLB only! */
+ u8 lca_const_mask; /* DTLB/UTLB only! */
+ u8 lca_const_value; /* DTLB/UTLB only! */
+ u8 sca_const_mask; /* DTLB/UTLB only! */
+ u8 sca_const_value; /* DTLB/UTLB only! */
+ /* These define an encoding that map 5 bits in TLB and PTE entries to */
+ /* 8 bits (FCA, ITLB), 16 bits (LCA+SCA, DTLB) or 24 bits (FCA+LCA+SCA, UTLB): */
+ /* (they may be moved to struct XtHalMmuTlb) */
+ u8 ca_bits; /* number of bits in TLB/PTE entries for cache attributes */
+ u32 * ca_map; /* pointer to array of 2^ca_bits entries of FCA+LCA+SCA bits */
+} XtHalMmuTlbWay;
+
+/*
+ * The way to determine whether protection support is present in core
+ * is to [look at Xthal_mmu_rings ???].
+ * Give info on memory requirements for MMU tables and other in-memory
+ * data structures (globally, per task, base and per page, etc.) - whatever bounds can be calculated.
+ */
+
+
+/* Default vectors: */
+xthal_immu_fetch_miss_vector
+xthal_dmmu_load_miss_vector
+xthal_dmmu_store_miss_vector
+
+/* Functions called when a fault is detected: */
+typedef void (XtHalMmuFaultFunc)( unsigned vaddr, ...context... );
+/* Or, */
+/* a? = vaddr */
+/* a? = context... */
+/* PS.xxx = xxx */
+XtHalMMuFaultFunc *Xthal_immu_fetch_fault_func;
+XtHalMMuFaultFunc *Xthal_dmmu_load_fault_func;
+XtHalMMuFaultFunc *Xthal_dmmu_store_fault_func;
+
+/* Default Handlers: */
+/* The user and/or kernel exception handlers may jump to these handlers to handle the relevant exceptions,
+ * according to the value of EXCCAUSE. The exact register state on entry to these handlers is TBD. */
+/* When multiple TLB entries match (hit) on the same access: */
+xthal_immu_fetch_multihit_handler
+xthal_dmmu_load_multihit_handler
+xthal_dmmu_store_multihit_handler
+/* Protection violations according to cache attributes, and other cache attribute mismatches: */
+xthal_immu_fetch_attr_handler
+xthal_dmmu_load_attr_handler
+xthal_dmmu_store_attr_handler
+/* Protection violations due to insufficient ring level: */
+xthal_immu_fetch_priv_handler
+xthal_dmmu_load_priv_handler
+xthal_dmmu_store_priv_handler
+/* Alignment exception handlers (if supported by the particular Xtensa MMU configuration): */
+xthal_dmmu_load_align_handler
+xthal_dmmu_store_align_handler
+
+/* Or, alternatively, the OS user and/or kernel exception handlers may simply jump to the
+ * following entry points which will handle any values of EXCCAUSE not handled by the OS: */
+xthal_user_exc_default_handler
+xthal_kernel_exc_default_handler
+
+#endif /*0*/
+
+#ifdef INCLUDE_DEPRECATED_HAL_CODE
+extern const unsigned char Xthal_have_old_exc_arch;
+extern const unsigned char Xthal_have_mmu;
+extern const unsigned int Xthal_num_regs;
+extern const unsigned char Xthal_num_iroms;
+extern const unsigned char Xthal_num_irams;
+extern const unsigned char Xthal_num_droms;
+extern const unsigned char Xthal_num_drams;
+extern const unsigned int Xthal_configid0;
+extern const unsigned int Xthal_configid1;
+#endif
+
+#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE
+#define XTHAL_24_BIT_BREAK 0x80000000
+#define XTHAL_16_BIT_BREAK 0x40000000
+extern const unsigned short Xthal_ill_inst_16[16];
+#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */
+#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */
+#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */
+#define XTHAL_RFW_INST 0x00000800
+#define XTHAL_RFUE_INST 0x00000400
+#define XTHAL_RFI_INST 0x00000200
+#define XTHAL_RFE_INST 0x00000100
+#define XTHAL_RET_INST 0x00000080
+#define XTHAL_BREAK_INST 0x00000040
+#define XTHAL_SYSCALL_INST 0x00000020
+#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */
+#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */
+#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */
+#define XTHAL_24_BIT_INST 0x00000002
+#define XTHAL_16_BIT_INST 0x00000001
+typedef struct xthal_state {
+ unsigned pc;
+ unsigned ar[16];
+ unsigned lbeg;
+ unsigned lend;
+ unsigned lcount;
+ unsigned extra_ptr;
+ unsigned cpregs_ptr[XTHAL_MAX_CPS];
+} XTHAL_STATE;
+extern unsigned int xthal_inst_type(void *addr);
+extern unsigned int xthal_branch_addr(void *addr);
+extern unsigned int xthal_get_npc(XTHAL_STATE *user_state);
+#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /*!__ASSEMBLY__ */
+
+#endif /*XTENSA_HAL_H*/
+
diff --git a/include/asm-xtensa/xtensa/simcall.h b/include/asm-xtensa/xtensa/simcall.h
new file mode 100644
index 00000000000..a2b868929a4
--- /dev/null
+++ b/include/asm-xtensa/xtensa/simcall.h
@@ -0,0 +1,130 @@
+#ifndef SIMCALL_INCLUDED
+#define SIMCALL_INCLUDED
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/simcall.h - Simulator call numbers
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*
+ * System call like services offered by the simulator host.
+ * These are modeled after the Linux 2.4 kernel system calls
+ * for Xtensa processors. However not all system calls and
+ * not all functionality of a given system call are implemented,
+ * or necessarily have well defined or equivalent semantics in
+ * the context of a simulation (as opposed to a Unix kernel).
+ *
+ * These services behave largely as if they had been invoked
+ * as a task in the simulator host's operating system
+ * (eg. files accessed are those of the simulator host).
+ * However, these SIMCALLs model a virtual operating system
+ * so that various definitions, bit assignments etc
+ * (eg. open mode bits, errno values, etc) are independent
+ * of the host operating system used to run the simulation.
+ * Rather these definitions are specific to the Xtensa ISS.
+ * This way Xtensa ISA code written to use these SIMCALLs
+ * can (in principle) be simulated on any host.
+ *
+ * Up to 6 parameters are passed in registers a3 to a8
+ * (note the 6th parameter isn't passed on the stack,
+ * unlike windowed function calling conventions).
+ * The return value is in a2. A negative value in the
+ * range -4096 to -1 indicates a negated error code to be
+ * reported in errno with a return value of -1, otherwise
+ * the value in a2 is returned as is.
+ */
+
+/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
+
+#define SYS_nop 0 /* n/a - setup; used to flush register windows */
+#define SYS_exit 1 /*x*/
+#define SYS_fork 2
+#define SYS_read 3 /*x*/
+#define SYS_write 4 /*x*/
+#define SYS_open 5 /*x*/
+#define SYS_close 6 /*x*/
+#define SYS_rename 7 /*x 38 - waitpid */
+#define SYS_creat 8 /*x*/
+#define SYS_link 9 /*x (not implemented on WIN32) */
+#define SYS_unlink 10 /*x*/
+#define SYS_execv 11 /* n/a - execve */
+#define SYS_execve 12 /* 11 - chdir */
+#define SYS_pipe 13 /* 42 - time */
+#define SYS_stat 14 /* 106 - mknod */
+#define SYS_chmod 15
+#define SYS_chown 16 /* 202 - lchown */
+#define SYS_utime 17 /* 30 - break */
+#define SYS_wait 18 /* n/a - oldstat */
+#define SYS_lseek 19 /*x*/
+#define SYS_getpid 20
+#define SYS_isatty 21 /* n/a - mount */
+#define SYS_fstat 22 /* 108 - oldumount */
+#define SYS_time 23 /* 13 - setuid */
+#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
+#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
+#define SYS_socket 26
+#define SYS_sendto 27
+#define SYS_recvfrom 28
+#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
+#define SYS_bind 30
+#define SYS_ioctl 31
+
+/*
+ * Other...
+ */
+#define SYS_iss_argc 1000 /* returns value of argc */
+#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
+#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
+
+/*
+ * SIMCALLs for the ferret memory debugger. All are invoked by
+ * libferret.a ... ( Xtensa/Target-Libs/ferret )
+ */
+#define SYS_ferret 1010
+#define SYS_malloc 1011
+#define SYS_free 1012
+#define SYS_more_heap 1013
+#define SYS_no_heap 1014
+
+
+/*
+ * Extra SIMCALLs for GDB:
+ */
+#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
+ to a break.n/break, regardless of cause! */
+#define SYS_xmon_out -2 /* invoked by XMON: ... */
+#define SYS_xmon_in -3 /* invoked by XMON: ... */
+#define SYS_xmon_flush -4 /* invoked by XMON: ... */
+#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
+#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
+#define SYS_xmon_init -7 /* invoked by XMON: ... */
+#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
+
+/*
+ * SIMCALLs for vxWorks xtiss BSP:
+ */
+#define SYS_setup_ppp_pipes -83
+#define SYS_log_msg -84
+
+/*
+ * Test SIMCALLs:
+ */
+#define SYS_test_write_state -100
+#define SYS_test_read_state -101
+
+/*
+ * SYS_select_one specifiers
+ */
+#define XTISS_SELECT_ONE_READ 1
+#define XTISS_SELECT_ONE_WRITE 2
+#define XTISS_SELECT_ONE_EXCEPT 3
+
+#endif /* !SIMCALL_INCLUDED */
diff --git a/include/asm-xtensa/xtensa/xt2000-uart.h b/include/asm-xtensa/xtensa/xt2000-uart.h
new file mode 100644
index 00000000000..0154460f0ed
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xt2000-uart.h
@@ -0,0 +1,155 @@
+#ifndef _uart_h_included_
+#define _uart_h_included_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/xt2000-uart.h -- NatSemi PC16552D DUART
+ * definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/xt2000.h>
+
+
+/* 16550 UART DEVICE REGISTERS
+ The XT2000 board aligns each register to a 32-bit word but the UART device only uses
+ one byte of the word, which is the least-significant byte regardless of the
+ endianness of the core (ie. byte offset 0 for little-endian and 3 for big-endian).
+ So if using word accesses then endianness doesn't matter.
+ The macros provided here do that.
+*/
+struct uart_dev_s {
+ union {
+ unsigned int rxb; /* DLAB=0: receive buffer, read-only */
+ unsigned int txb; /* DLAB=0: transmit buffer, write-only */
+ unsigned int dll; /* DLAB=1: divisor, least-significant byte latch (was write-only?) */
+ } w0;
+ union {
+ unsigned int ier; /* DLAB=0: interrupt-enable register (was write-only?) */
+ unsigned int dlm; /* DLAB=1: divisor, most-significant byte latch (was write-only?) */
+ } w1;
+
+ union {
+ unsigned int isr; /* DLAB=0: interrupt status register, read-only */
+ unsigned int fcr; /* DLAB=0: FIFO control register, write-only */
+ unsigned int afr; /* DLAB=1: alternate function register */
+ } w2;
+
+ unsigned int lcr; /* line control-register, write-only */
+ unsigned int mcr; /* modem control-regsiter, write-only */
+ unsigned int lsr; /* line status register, read-only */
+ unsigned int msr; /* modem status register, read-only */
+ unsigned int scr; /* scratch regsiter, read/write */
+};
+
+#define _RXB(u) ((u)->w0.rxb)
+#define _TXB(u) ((u)->w0.txb)
+#define _DLL(u) ((u)->w0.dll)
+#define _IER(u) ((u)->w1.ier)
+#define _DLM(u) ((u)->w1.dlm)
+#define _ISR(u) ((u)->w2.isr)
+#define _FCR(u) ((u)->w2.fcr)
+#define _AFR(u) ((u)->w2.afr)
+#define _LCR(u) ((u)->lcr)
+#define _MCR(u) ((u)->mcr)
+#define _LSR(u) ((u)->lsr)
+#define _MSR(u) ((u)->msr)
+#define _SCR(u) ((u)->scr)
+
+typedef volatile struct uart_dev_s uart_dev_t;
+
+/* IER bits */
+#define RCVR_DATA_REG_INTENABLE 0x01
+#define XMIT_HOLD_REG_INTENABLE 0x02
+#define RCVR_STATUS_INTENABLE 0x04
+#define MODEM_STATUS_INTENABLE 0x08
+
+/* FCR bits */
+#define _FIFO_ENABLE 0x01
+#define RCVR_FIFO_RESET 0x02
+#define XMIT_FIFO_RESET 0x04
+#define DMA_MODE_SELECT 0x08
+#define RCVR_TRIGGER_LSB 0x40
+#define RCVR_TRIGGER_MSB 0x80
+
+/* AFR bits */
+#define AFR_CONC_WRITE 0x01
+#define AFR_BAUDOUT_SEL 0x02
+#define AFR_RXRDY_SEL 0x04
+
+/* ISR bits */
+#define INT_STATUS(r) ((r)&1)
+#define INT_PRIORITY(r) (((r)>>1)&0x7)
+
+/* LCR bits */
+#define WORD_LENGTH(n) (((n)-5)&0x3)
+#define STOP_BIT_ENABLE 0x04
+#define PARITY_ENABLE 0x08
+#define EVEN_PARITY 0x10
+#define FORCE_PARITY 0x20
+#define XMIT_BREAK 0x40
+#define DLAB_ENABLE 0x80
+
+/* MCR bits */
+#define _DTR 0x01
+#define _RTS 0x02
+#define _OP1 0x04
+#define _OP2 0x08
+#define LOOP_BACK 0x10
+
+/* LSR Bits */
+#define RCVR_DATA_READY 0x01
+#define OVERRUN_ERROR 0x02
+#define PARITY_ERROR 0x04
+#define FRAMING_ERROR 0x08
+#define BREAK_INTERRUPT 0x10
+#define XMIT_HOLD_EMPTY 0x20
+#define XMIT_EMPTY 0x40
+#define FIFO_ERROR 0x80
+#define RCVR_READY(u) (_LSR(u)&RCVR_DATA_READY)
+#define XMIT_READY(u) (_LSR(u)&XMIT_HOLD_EMPTY)
+
+/* MSR bits */
+#define _RDR 0x01
+#define DELTA_DSR 0x02
+#define DELTA_RI 0x04
+#define DELTA_CD 0x08
+#define _CTS 0x10
+#define _DSR 0x20
+#define _RI 0x40
+#define _CD 0x80
+
+/* prototypes */
+void uart_init( uart_dev_t *u, int bitrate );
+void uart_out( uart_dev_t *u, char c );
+void uart_puts( uart_dev_t *u, char *s );
+char uart_in( uart_dev_t *u );
+void uart_enable_rcvr_int( uart_dev_t *u );
+void uart_disable_rcvr_int( uart_dev_t *u );
+
+#ifdef DUART16552_1_VADDR
+/* DUART present. */
+#define DUART_1_BASE (*(uart_dev_t*)DUART16552_1_VADDR)
+#define DUART_2_BASE (*(uart_dev_t*)DUART16552_2_VADDR)
+#define UART1_PUTS(s) uart_puts( &DUART_1_BASE, s )
+#define UART2_PUTS(s) uart_puts( &DUART_2_BASE, s )
+#else
+/* DUART not configured, use dummy placeholders to allow compiles to work. */
+#define DUART_1_BASE (*(uart_dev_t*)0)
+#define DUART_2_BASE (*(uart_dev_t*)0)
+#define UART1_PUTS(s)
+#define UART2_PUTS(s)
+#endif
+
+/* Compute 16-bit divisor for baudrate generator, with rounding: */
+#define DUART_DIVISOR(crystal,speed) (((crystal)/16 + (speed)/2)/(speed))
+
+#endif /*_uart_h_included_*/
+
diff --git a/include/asm-xtensa/xtensa/xt2000.h b/include/asm-xtensa/xtensa/xt2000.h
new file mode 100644
index 00000000000..703a45002f8
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xt2000.h
@@ -0,0 +1,408 @@
+#ifndef _INC_XT2000_H_
+#define _INC_XT2000_H_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/xt2000.h - Definitions specific to the
+ * Tensilica XT2000 Emulation Board
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/config/core.h>
+#include <xtensa/config/system.h>
+
+
+/*
+ * Default assignment of XT2000 devices to external interrupts.
+ */
+
+/* Ethernet interrupt: */
+#ifdef XCHAL_EXTINT3_NUM
+#define SONIC83934_INTNUM XCHAL_EXTINT3_NUM
+#define SONIC83934_INTLEVEL XCHAL_EXTINT3_LEVEL
+#define SONIC83934_INTMASK XCHAL_EXTINT3_MASK
+#else
+#define SONIC83934_INTMASK 0
+#endif
+
+/* DUART channel 1 interrupt (P1 - console): */
+#ifdef XCHAL_EXTINT4_NUM
+#define DUART16552_1_INTNUM XCHAL_EXTINT4_NUM
+#define DUART16552_1_INTLEVEL XCHAL_EXTINT4_LEVEL
+#define DUART16552_1_INTMASK XCHAL_EXTINT4_MASK
+#else
+#define DUART16552_1_INTMASK 0
+#endif
+
+/* DUART channel 2 interrupt (P2 - 2nd serial port): */
+#ifdef XCHAL_EXTINT5_NUM
+#define DUART16552_2_INTNUM XCHAL_EXTINT5_NUM
+#define DUART16552_2_INTLEVEL XCHAL_EXTINT5_LEVEL
+#define DUART16552_2_INTMASK XCHAL_EXTINT5_MASK
+#else
+#define DUART16552_2_INTMASK 0
+#endif
+
+/* FPGA-combined PCI/etc interrupts: */
+#ifdef XCHAL_EXTINT6_NUM
+#define XT2000_FPGAPCI_INTNUM XCHAL_EXTINT6_NUM
+#define XT2000_FPGAPCI_INTLEVEL XCHAL_EXTINT6_LEVEL
+#define XT2000_FPGAPCI_INTMASK XCHAL_EXTINT6_MASK
+#else
+#define XT2000_FPGAPCI_INTMASK 0
+#endif
+
+
+
+/*
+ * Device addresses.
+ *
+ * Note: for endianness-independence, use 32-bit loads and stores for all
+ * register accesses to Ethernet, DUART and LED devices. Undefined bits
+ * may need to be masked out if needed when reading if the actual register
+ * size is smaller than 32 bits.
+ *
+ * Note: XT2000 bus byte lanes are defined in terms of msbyte and lsbyte
+ * relative to the processor. So 32-bit registers are accessed consistently
+ * from both big and little endian processors. However, this means byte
+ * sequences are not consistent between big and little endian processors.
+ * This is fine for RAM, and for ROM if ROM is created for a specific
+ * processor (and thus has correct byte sequences). However this may be
+ * unexpected for Flash, which might contain a file-system that one wants
+ * to use for multiple processor configurations (eg. the Flash might contain
+ * the Ethernet card's address, endianness-independent application data, etc).
+ * That is, byte sequences written in Flash by a core of a given endianness
+ * will be byte-swapped when seen by a core of the other endianness.
+ * Someone implementing an endianness-independent Flash file system will
+ * likely handle this byte-swapping issue in the Flash driver software.
+ */
+
+#define DUART16552_XTAL_FREQ 18432000 /* crystal frequency in Hz */
+#define XTBOARD_FLASH_MAXSIZE 0x4000000 /* 64 MB (max; depends on what is socketed!) */
+#define XTBOARD_EPROM_MAXSIZE 0x0400000 /* 4 MB (max; depends on what is socketed!) */
+#define XTBOARD_EEPROM_MAXSIZE 0x0080000 /* 512 kB (max; depends on what is socketed!) */
+#define XTBOARD_ASRAM_SIZE 0x0100000 /* 1 MB */
+#define XTBOARD_PCI_MEM_SIZE 0x8000000 /* 128 MB (allocated) */
+#define XTBOARD_PCI_IO_SIZE 0x1000000 /* 16 MB (allocated) */
+
+#ifdef XSHAL_IOBLOCK_BYPASS_PADDR
+/* PCI memory space: */
+# define XTBOARD_PCI_MEM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x0000000)
+/* Socketed Flash (eg. 2 x 16-bit devices): */
+# define XTBOARD_FLASH_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x8000000)
+/* PCI I/O space: */
+# define XTBOARD_PCI_IO_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xC000000)
+/* V3 PCI interface chip register/config space: */
+# define XTBOARD_V3PCI_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD000000)
+/* Bus Interface registers: */
+# define XTBOARD_BUSINT_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD010000)
+/* FPGA registers: */
+# define XT2000_FPGAREGS_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD020000)
+/* SONIC SN83934 Ethernet controller/transceiver: */
+# define SONIC83934_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD030000)
+/* 8-character bitmapped LED display: */
+# define XTBOARD_LED_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD040000)
+/* National-Semi PC16552D DUART: */
+# define DUART16552_1_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050020) /* channel 1 (P1 - console) */
+# define DUART16552_2_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050000) /* channel 2 (P2) */
+/* Asynchronous Static RAM: */
+# define XTBOARD_ASRAM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD400000)
+/* 8-bit EEPROM: */
+# define XTBOARD_EEPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD600000)
+/* 2 x 16-bit EPROMs: */
+# define XTBOARD_EPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_BYPASS_PADDR */
+
+/* These devices might be accessed cached: */
+#ifdef XSHAL_IOBLOCK_CACHED_PADDR
+# define XTBOARD_PCI_MEM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x0000000)
+# define XTBOARD_FLASH_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x8000000)
+# define XTBOARD_ASRAM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD400000)
+# define XTBOARD_EEPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD600000)
+# define XTBOARD_EPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_CACHED_PADDR */
+
+
+/*** Same thing over again, this time with virtual addresses: ***/
+
+#ifdef XSHAL_IOBLOCK_BYPASS_VADDR
+/* PCI memory space: */
+# define XTBOARD_PCI_MEM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x0000000)
+/* Socketed Flash (eg. 2 x 16-bit devices): */
+# define XTBOARD_FLASH_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x8000000)
+/* PCI I/O space: */
+# define XTBOARD_PCI_IO_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xC000000)
+/* V3 PCI interface chip register/config space: */
+# define XTBOARD_V3PCI_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD000000)
+/* Bus Interface registers: */
+# define XTBOARD_BUSINT_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD010000)
+/* FPGA registers: */
+# define XT2000_FPGAREGS_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD020000)
+/* SONIC SN83934 Ethernet controller/transceiver: */
+# define SONIC83934_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD030000)
+/* 8-character bitmapped LED display: */
+# define XTBOARD_LED_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD040000)
+/* National-Semi PC16552D DUART: */
+# define DUART16552_1_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050020) /* channel 1 (P1 - console) */
+# define DUART16552_2_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050000) /* channel 2 (P2) */
+/* Asynchronous Static RAM: */
+# define XTBOARD_ASRAM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD400000)
+/* 8-bit EEPROM: */
+# define XTBOARD_EEPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD600000)
+/* 2 x 16-bit EPROMs: */
+# define XTBOARD_EPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_BYPASS_VADDR */
+
+/* These devices might be accessed cached: */
+#ifdef XSHAL_IOBLOCK_CACHED_VADDR
+# define XTBOARD_PCI_MEM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x0000000)
+# define XTBOARD_FLASH_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x8000000)
+# define XTBOARD_ASRAM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD400000)
+# define XTBOARD_EEPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD600000)
+# define XTBOARD_EPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_CACHED_VADDR */
+
+
+/* System ROM: */
+#define XTBOARD_ROM_SIZE XSHAL_ROM_SIZE
+#ifdef XSHAL_ROM_VADDR
+#define XTBOARD_ROM_VADDR XSHAL_ROM_VADDR
+#endif
+#ifdef XSHAL_ROM_PADDR
+#define XTBOARD_ROM_PADDR XSHAL_ROM_PADDR
+#endif
+
+/* System RAM: */
+#define XTBOARD_RAM_SIZE XSHAL_RAM_SIZE
+#ifdef XSHAL_RAM_VADDR
+#define XTBOARD_RAM_VADDR XSHAL_RAM_VADDR
+#endif
+#ifdef XSHAL_RAM_PADDR
+#define XTBOARD_RAM_PADDR XSHAL_RAM_PADDR
+#endif
+#define XTBOARD_RAM_BYPASS_VADDR XSHAL_RAM_BYPASS_VADDR
+#define XTBOARD_RAM_BYPASS_PADDR XSHAL_RAM_BYPASS_PADDR
+
+
+
+/*
+ * Things that depend on device addresses.
+ */
+
+
+#define XTBOARD_CACHEATTR_WRITEBACK XSHAL_XT2000_CACHEATTR_WRITEBACK
+#define XTBOARD_CACHEATTR_WRITEALLOC XSHAL_XT2000_CACHEATTR_WRITEALLOC
+#define XTBOARD_CACHEATTR_WRITETHRU XSHAL_XT2000_CACHEATTR_WRITETHRU
+#define XTBOARD_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS
+#define XTBOARD_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT
+
+#define XTBOARD_BUSINT_PIPE_REGIONS XSHAL_XT2000_PIPE_REGIONS
+#define XTBOARD_BUSINT_SDRAM_REGIONS XSHAL_XT2000_SDRAM_REGIONS
+
+
+
+/*
+ * BusLogic (FPGA) registers.
+ * All these registers are normally accessed using 32-bit loads/stores.
+ */
+
+/* Register offsets: */
+#define XT2000_DATECD_OFS 0x00 /* date code (read-only) */
+#define XT2000_STSREG_OFS 0x04 /* status (read-only) */
+#define XT2000_SYSLED_OFS 0x08 /* system LED */
+#define XT2000_WRPROT_OFS 0x0C /* write protect */
+#define XT2000_SWRST_OFS 0x10 /* software reset */
+#define XT2000_SYSRST_OFS 0x14 /* system (peripherals) reset */
+#define XT2000_IMASK_OFS 0x18 /* interrupt mask */
+#define XT2000_ISTAT_OFS 0x1C /* interrupt status */
+#define XT2000_V3CFG_OFS 0x20 /* V3 config (V320 PCI) */
+
+/* Physical register addresses: */
+#ifdef XT2000_FPGAREGS_PADDR
+#define XT2000_DATECD_PADDR (XT2000_FPGAREGS_PADDR+XT2000_DATECD_OFS)
+#define XT2000_STSREG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_STSREG_OFS)
+#define XT2000_SYSLED_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSLED_OFS)
+#define XT2000_WRPROT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_WRPROT_OFS)
+#define XT2000_SWRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SWRST_OFS)
+#define XT2000_SYSRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSRST_OFS)
+#define XT2000_IMASK_PADDR (XT2000_FPGAREGS_PADDR+XT2000_IMASK_OFS)
+#define XT2000_ISTAT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_ISTAT_OFS)
+#define XT2000_V3CFG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_V3CFG_OFS)
+#endif
+
+/* Virtual register addresses: */
+#ifdef XT2000_FPGAREGS_VADDR
+#define XT2000_DATECD_VADDR (XT2000_FPGAREGS_VADDR+XT2000_DATECD_OFS)
+#define XT2000_STSREG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_STSREG_OFS)
+#define XT2000_SYSLED_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSLED_OFS)
+#define XT2000_WRPROT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_WRPROT_OFS)
+#define XT2000_SWRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SWRST_OFS)
+#define XT2000_SYSRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSRST_OFS)
+#define XT2000_IMASK_VADDR (XT2000_FPGAREGS_VADDR+XT2000_IMASK_OFS)
+#define XT2000_ISTAT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_ISTAT_OFS)
+#define XT2000_V3CFG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_V3CFG_OFS)
+/* Register access (for C code): */
+#define XT2000_DATECD_REG (*(volatile unsigned*) XT2000_DATECD_VADDR)
+#define XT2000_STSREG_REG (*(volatile unsigned*) XT2000_STSREG_VADDR)
+#define XT2000_SYSLED_REG (*(volatile unsigned*) XT2000_SYSLED_VADDR)
+#define XT2000_WRPROT_REG (*(volatile unsigned*) XT2000_WRPROT_VADDR)
+#define XT2000_SWRST_REG (*(volatile unsigned*) XT2000_SWRST_VADDR)
+#define XT2000_SYSRST_REG (*(volatile unsigned*) XT2000_SYSRST_VADDR)
+#define XT2000_IMASK_REG (*(volatile unsigned*) XT2000_IMASK_VADDR)
+#define XT2000_ISTAT_REG (*(volatile unsigned*) XT2000_ISTAT_VADDR)
+#define XT2000_V3CFG_REG (*(volatile unsigned*) XT2000_V3CFG_VADDR)
+#endif
+
+/* DATECD (date code) bit fields: */
+
+/* BCD-coded month (01..12): */
+#define XT2000_DATECD_MONTH_SHIFT 24
+#define XT2000_DATECD_MONTH_BITS 8
+#define XT2000_DATECD_MONTH_MASK 0xFF000000
+/* BCD-coded day (01..31): */
+#define XT2000_DATECD_DAY_SHIFT 16
+#define XT2000_DATECD_DAY_BITS 8
+#define XT2000_DATECD_DAY_MASK 0x00FF0000
+/* BCD-coded year (2001..9999): */
+#define XT2000_DATECD_YEAR_SHIFT 0
+#define XT2000_DATECD_YEAR_BITS 16
+#define XT2000_DATECD_YEAR_MASK 0x0000FFFF
+
+/* STSREG (status) bit fields: */
+
+/* Switch SW3 setting bit fields (0=off/up, 1=on/down): */
+#define XT2000_STSREG_SW3_SHIFT 0
+#define XT2000_STSREG_SW3_BITS 4
+#define XT2000_STSREG_SW3_MASK 0x0000000F
+/* Boot-select bits of switch SW3: */
+#define XT2000_STSREG_BOOTSEL_SHIFT 0
+#define XT2000_STSREG_BOOTSEL_BITS 2
+#define XT2000_STSREG_BOOTSEL_MASK 0x00000003
+/* Boot-select values: */
+#define XT2000_STSREG_BOOTSEL_FLASH 0
+#define XT2000_STSREG_BOOTSEL_EPROM16 1
+#define XT2000_STSREG_BOOTSEL_PROM8 2
+#define XT2000_STSREG_BOOTSEL_ASRAM 3
+/* User-defined bits of switch SW3: */
+#define XT2000_STSREG_SW3_2_SHIFT 2
+#define XT2000_STSREG_SW3_2_MASK 0x00000004
+#define XT2000_STSREG_SW3_3_SHIFT 3
+#define XT2000_STSREG_SW3_3_MASK 0x00000008
+
+/* SYSLED (system LED) bit fields: */
+
+/* LED control bit (0=off, 1=on): */
+#define XT2000_SYSLED_LEDON_SHIFT 0
+#define XT2000_SYSLED_LEDON_MASK 0x00000001
+
+/* WRPROT (write protect) bit fields (0=writable, 1=write-protected [default]): */
+
+/* Flash write protect: */
+#define XT2000_WRPROT_FLWP_SHIFT 0
+#define XT2000_WRPROT_FLWP_MASK 0x00000001
+/* Reserved but present write protect bits: */
+#define XT2000_WRPROT_WRP_SHIFT 1
+#define XT2000_WRPROT_WRP_BITS 7
+#define XT2000_WRPROT_WRP_MASK 0x000000FE
+
+/* SWRST (software reset; allows s/w to generate power-on equivalent reset): */
+
+/* Software reset bits: */
+#define XT2000_SWRST_SWR_SHIFT 0
+#define XT2000_SWRST_SWR_BITS 16
+#define XT2000_SWRST_SWR_MASK 0x0000FFFF
+/* Software reset value -- writing this value resets the board: */
+#define XT2000_SWRST_RESETVALUE 0x0000DEAD
+
+/* SYSRST (system reset; controls reset of individual peripherals): */
+
+/* All-device reset: */
+#define XT2000_SYSRST_ALL_SHIFT 0
+#define XT2000_SYSRST_ALL_BITS 4
+#define XT2000_SYSRST_ALL_MASK 0x0000000F
+/* HDSP-2534 LED display reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_LED_SHIFT 0
+#define XT2000_SYSRST_LED_MASK 0x00000001
+/* Sonic DP83934 Ethernet controller reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_SONIC_SHIFT 1
+#define XT2000_SYSRST_SONIC_MASK 0x00000002
+/* DP16552 DUART reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_DUART_SHIFT 2
+#define XT2000_SYSRST_DUART_MASK 0x00000004
+/* V3 V320 PCI bridge controller reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_V3_SHIFT 3
+#define XT2000_SYSRST_V3_MASK 0x00000008
+
+/* IMASK (interrupt mask; 0=disable, 1=enable): */
+/* ISTAT (interrupt status; 0=inactive, 1=pending): */
+
+/* PCI INTP interrupt: */
+#define XT2000_INTMUX_PCI_INTP_SHIFT 2
+#define XT2000_INTMUX_PCI_INTP_MASK 0x00000004
+/* PCI INTS interrupt: */
+#define XT2000_INTMUX_PCI_INTS_SHIFT 3
+#define XT2000_INTMUX_PCI_INTS_MASK 0x00000008
+/* PCI INTD interrupt: */
+#define XT2000_INTMUX_PCI_INTD_SHIFT 4
+#define XT2000_INTMUX_PCI_INTD_MASK 0x00000010
+/* V320 PCI controller interrupt: */
+#define XT2000_INTMUX_V3_SHIFT 5
+#define XT2000_INTMUX_V3_MASK 0x00000020
+/* PCI ENUM interrupt: */
+#define XT2000_INTMUX_PCI_ENUM_SHIFT 6
+#define XT2000_INTMUX_PCI_ENUM_MASK 0x00000040
+/* PCI DEG interrupt: */
+#define XT2000_INTMUX_PCI_DEG_SHIFT 7
+#define XT2000_INTMUX_PCI_DEG_MASK 0x00000080
+
+/* V3CFG (V3 config, V320 PCI controller): */
+
+/* V3 address control (0=pass-thru, 1=V3 address bits 31:28 set to 4'b0001 [default]): */
+#define XT2000_V3CFG_V3ADC_SHIFT 0
+#define XT2000_V3CFG_V3ADC_MASK 0x00000001
+
+/* I2C Devices */
+
+#define XT2000_I2C_RTC_ID 0x68
+#define XT2000_I2C_NVRAM0_ID 0x56 /* 1st 256 byte block */
+#define XT2000_I2C_NVRAM1_ID 0x57 /* 2nd 256 byte block */
+
+/* NVRAM Board Info structure: */
+
+#define XT2000_NVRAM_SIZE 512
+
+#define XT2000_NVRAM_BINFO_START 0x100
+#define XT2000_NVRAM_BINFO_SIZE 0x20
+#define XT2000_NVRAM_BINFO_VERSION 0x10 /* version 1.0 */
+#if 0
+#define XT2000_NVRAM_BINFO_VERSION_OFFSET 0x00
+#define XT2000_NVRAM_BINFO_VERSION_SIZE 0x1
+#define XT2000_NVRAM_BINFO_ETH_ADDR_OFFSET 0x02
+#define XT2000_NVRAM_BINFO_ETH_ADDR_SIZE 0x6
+#define XT2000_NVRAM_BINFO_SN_OFFSET 0x10
+#define XT2000_NVRAM_BINFO_SN_SIZE 0xE
+#define XT2000_NVRAM_BINFO_CRC_OFFSET 0x1E
+#define XT2000_NVRAM_BINFO_CRC_SIZE 0x2
+#endif /*0*/
+
+#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
+typedef struct xt2000_nvram_binfo {
+ unsigned char version;
+ unsigned char reserved1;
+ unsigned char eth_addr[6];
+ unsigned char reserved8[8];
+ unsigned char serialno[14];
+ unsigned char crc[2]; /* 16-bit CRC */
+} xt2000_nvram_binfo;
+#endif /*!__ASSEMBLY__ && !_NOCLANGUAGE*/
+
+
+#endif /*_INC_XT2000_H_*/
+
diff --git a/include/asm-xtensa/xtensa/xtboard.h b/include/asm-xtensa/xtensa/xtboard.h
new file mode 100644
index 00000000000..22469c17530
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xtboard.h
@@ -0,0 +1,120 @@
+#ifndef _xtboard_h_included_
+#define _xtboard_h_included_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * xtboard.h -- Routines for getting useful information from the board.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/xt2000.h>
+
+#define XTBOARD_RTC_ERROR -1
+#define XTBOARD_RTC_STOPPED -2
+
+
+/* xt2000-i2cdev.c: */
+typedef void XtboardDelayFunc( unsigned );
+extern XtboardDelayFunc* xtboard_set_nsdelay_func( XtboardDelayFunc *delay_fn );
+extern int xtboard_i2c_read (unsigned id, unsigned char *buf, unsigned addr, unsigned size);
+extern int xtboard_i2c_write(unsigned id, unsigned char *buf, unsigned addr, unsigned size);
+extern int xtboard_i2c_wait_nvram_ack(unsigned id, unsigned swtimer);
+
+/* xtboard.c: */
+extern int xtboard_nvram_read (unsigned addr, unsigned len, unsigned char *buf);
+extern int xtboard_nvram_write(unsigned addr, unsigned len, unsigned char *buf);
+extern int xtboard_nvram_binfo_read (xt2000_nvram_binfo *buf);
+extern int xtboard_nvram_binfo_write(xt2000_nvram_binfo *buf);
+extern int xtboard_nvram_binfo_valid(xt2000_nvram_binfo *buf);
+extern int xtboard_ethermac_get(unsigned char *buf);
+extern int xtboard_ethermac_set(unsigned char *buf);
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_rtc_time
+/
+/ Description: Get time stored in real-time clock.
+/
+/ Returns: time in seconds stored in real-time clock.
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_get_rtc_time(void);
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_set_rtc_time
+/
+/ Description: Set time stored in real-time clock.
+/
+/ Parameters: time -- time in seconds to store to real-time clock
+/
+/ Returns: 0 on success, xtboard_i2c_write() error code otherwise.
+/-**----------------------------------------------------------------------------*/
+
+extern int xtboard_set_rtc_time(unsigned time);
+
+
+/* xtfreq.c: */
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_measure_sys_clk
+/
+/ Description: Get frequency of system clock.
+/
+/ Parameters: none
+/
+/ Returns: frequency of system clock.
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_measure_sys_clk(void);
+
+
+#if 0 /* old stuff from xtboard.c: */
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_nvram valid
+/
+/ Description: Determines if data in NVRAM is valid.
+/
+/ Parameters: delay -- 10us delay function
+/
+/ Returns: 1 if NVRAM is valid, 0 otherwise
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_nvram_valid(void (*delay)( void ));
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_nvram_contents
+/
+/ Description: Returns contents of NVRAM.
+/
+/ Parameters: buf -- buffer to NVRAM contents.
+/ delay -- 10us delay function
+/
+/ Returns: 1 if NVRAM is valid, 0 otherwise
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_get_nvram_contents(unsigned char *buf, void (*delay)( void ));
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_ether_addr
+/
+/ Description: Returns ethernet address of board.
+/
+/ Parameters: buf -- buffer to store ethernet address
+/ delay -- 10us delay function
+/
+/ Returns: nothing.
+/-**----------------------------------------------------------------------------*/
+
+extern void xtboard_get_ether_addr(unsigned char *buf, void (*delay)( void ));
+
+#endif /*0*/
+
+
+#endif /*_xtboard_h_included_*/
+