(2006-08-06) rescue-bootcd

This commit is contained in:
2006-08-06 00:00:00 +02:00
parent 2f796b816a
commit decb062d20
21091 changed files with 7076462 additions and 0 deletions

View File

@@ -0,0 +1,35 @@
#ifndef _ASM_IA64_A_OUT_H
#define _ASM_IA64_A_OUT_H
/*
* No a.out format has been (or should be) defined so this file is
* just a dummy that allows us to get binfmt_elf compiled. It
* probably would be better to clean up binfmt_elf.c so it does not
* necessarily depend on there being a.out support.
*
* Modified 1998-2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#include <linux/types.h>
struct exec {
unsigned long a_info;
unsigned long a_text;
unsigned long a_data;
unsigned long a_bss;
unsigned long a_entry;
};
#define N_TXTADDR(x) 0
#define N_DATADDR(x) 0
#define N_BSSADDR(x) 0
#define N_DRSIZE(x) 0
#define N_TRSIZE(x) 0
#define N_SYMSIZE(x) 0
#define N_TXTOFF(x) 0
#ifdef __KERNEL__
#include <asm/ustack.h>
#endif
#endif /* _ASM_IA64_A_OUT_H */

View File

@@ -0,0 +1,17 @@
/*
* ia64/platform/hp/common/hp_acpi.h
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
* Copyright (C) Bjorn Helgaas
*
* Vendor specific extensions to ACPI.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
#endif /* _ASM_IA64_ACPI_EXT_H */

View File

@@ -0,0 +1,112 @@
/*
* asm-ia64/acpi.h
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/system.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
static inline int
ia64_acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
ia64_acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
static inline void disable_acpi(void) { }
const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
#ifdef CONFIG_ACPI_NUMA
/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
#define MAX_PXM_DOMAINS (256)
extern int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
extern u16 ia64_acpiid_to_sapicid[];
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/

View File

@@ -0,0 +1,21 @@
#ifndef _ASM_IA64_AGP_H
#define _ASM_IA64_AGP_H
/*
* IA-64 specific AGP definitions.
*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
* in coherent mode, which lets us map the AGP memory as normal (write-back) memory
* (unlike x86, where it gets mapped "write-coalescing").
*/
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
#endif /* _ASM_IA64_AGP_H */

View File

@@ -0,0 +1,111 @@
#ifndef _ASM_IA64_ASMMACRO_H
#define _ASM_IA64_ASMMACRO_H
/*
* Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#define ENTRY(name) \
.align 32; \
.proc name; \
name:
#define ENTRY_MIN_ALIGN(name) \
.align 16; \
.proc name; \
name:
#define GLOBAL_ENTRY(name) \
.global name; \
ENTRY(name)
#define END(name) \
.endp name
/*
* Helper macros to make unwind directives more readable:
*/
/* prologue_gr: */
#define ASM_UNW_PRLG_RP 0x8
#define ASM_UNW_PRLG_PFS 0x4
#define ASM_UNW_PRLG_PSP 0x2
#define ASM_UNW_PRLG_PR 0x1
#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
/*
* Helper macros for accessing user memory.
*/
.section "__ex_table", "a" // declare section & section attributes
.previous
# define EX(y,x...) \
.xdata4 "__ex_table", 99f-., y-.; \
[99:] x
# define EXCLR(y,x...) \
.xdata4 "__ex_table", 99f-., y-.+4; \
[99:] x
/*
* Mark instructions that need a load of a virtual address patched to be
* a load of a physical address. We use this either in critical performance
* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
.section ".data.patch.vtop", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
.xdata4 ".data.patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
* we'll patch out the work-around bundles with NOPs, so their impact is minimal.
*/
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
.section ".data.patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \
.xdata4 ".data.patch.mckinley_e9", 1f-.; \
1:{ .mib; \
nop.m 0; \
mov r16=ar.pfs; \
br.call.sptk.many b7=2f;; \
}; \
2:{ .mib; \
nop.m 0; \
mov ar.pfs=r16; \
br.ret.sptk.many b6;; \
}
#else
# define FSYS_RETURN br.ret.sptk.many b6
#endif
/*
* Up until early 2004, use of .align within a function caused bad unwind info.
* TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
* otherwise.
*/
#ifdef HAVE_WORKING_TEXT_ALIGN
# define TEXT_ALIGN(n) .align n
#else
# define TEXT_ALIGN(n)
#endif
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define dv_serialize_data .serialize.data
# define dv_serialize_instruction .serialize.instruction
#else
# define dv_serialize_data
# define dv_serialize_instruction
#endif
#endif /* _ASM_IA64_ASMMACRO_H */

View File

@@ -0,0 +1,183 @@
#ifndef _ASM_IA64_ATOMIC_H
#define _ASM_IA64_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* NOTE: don't mess with the types below! The "unsigned long" and
* "int" types were carefully placed so as to ensure proper operation
* of the macros.
*
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/types.h>
#include <asm/intrinsics.h>
/*
* On IA-64, counter must always be volatile to ensure that that the
* memory accesses are ordered.
*/
typedef struct { volatile __s32 counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ int
ia64_atomic_add (int i, atomic_t *v)
{
__s32 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ int
ia64_atomic64_add (__s64 i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
static __inline__ int
ia64_atomic_sub (int i, atomic_t *v)
{
__s32 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ int
ia64_atomic64_sub (__s64 i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
*/
static __inline__ int
atomic_add_negative (int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static __inline__ int
atomic64_add_negative (__s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
}
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
#define atomic_add(i,v) atomic_add_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) atomic64_add_return((i), (v))
#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* _ASM_IA64_ATOMIC_H */

View File

@@ -0,0 +1,410 @@
#ifndef _ASM_IA64_BITOPS_H
#define _ASM_IA64_BITOPS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
* scheduler patch
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/bitops.h>
#include <asm/intrinsics.h>
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*
* The address must be (at least) "long" aligned.
* Note that there are driver (e.g., eepro100) which use these operations to operate on
* hw-defined data-structures, so we can't easily change these operations to force a
* bigger alignment.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static __inline__ void
set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__set_bit (int nr, volatile void *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
/*
* clear_bit() has "acquire" semantics.
*/
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void
clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __clear_bit - Clears a bit in memory (non-atomic version)
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
volatile __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
*p &= ~m;
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __inline__ void
change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__change_bit (int nr, volatile void *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_set_bit (int nr, volatile void *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = (*p & m) != 0;
*p |= m;
return oldbitset;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
return (old & ~mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_clear_bit(int nr, volatile void * addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = *p & m;
*p &= ~m;
return oldbitset;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/*
* WARNING: non atomic version.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
old = *m;
*m = old ^ bit;
return (old & bit) != 0;
}
static __inline__ int
test_bit (int nr, const volatile void *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}
/**
* ffz - find the first zero bit in a long word
* @x: The long word to find the bit in
*
* Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
* no zero exists, so code should check against ~0UL first...
*/
static inline unsigned long
ffz (unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x & (~x - 1));
return result;
}
/**
* __ffs - find first bit in word.
* @x: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long
__ffs (unsigned long x)
{
unsigned long result;
result = ia64_popcnt((x-1) & ~x);
return result;
}
#ifdef __KERNEL__
/*
* find_last_zero_bit - find the last zero bit in a 64 bit quantity
* @x: The value to search
*/
static inline unsigned long
ia64_fls (unsigned long x)
{
long double d = x;
long exp;
exp = ia64_getf_exp(d);
return exp - 0xffff;
}
static inline int
fls (int x)
{
return ia64_fls((unsigned int) x);
}
/*
* ffs: find first bit set. This is defined the same way as the libc and compiler builtin
* ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
* "int" values only and the result value is the bit number + 1. ffs(0) is defined to
* return zero.
*/
#define ffs(x) __builtin_ffs(x)
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
static __inline__ unsigned long
hweight64 (unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
#define hweight32(x) hweight64 ((x) & 0xfffffffful)
#define hweight16(x) hweight64 ((x) & 0xfffful)
#define hweight8(x) hweight64 ((x) & 0xfful)
#endif /* __KERNEL__ */
extern int __find_next_zero_bit (void *addr, unsigned long size,
unsigned long offset);
extern int __find_next_bit(const void *addr, unsigned long size,
unsigned long offset);
#define find_next_zero_bit(addr, size, offset) \
__find_next_zero_bit((addr), (size), (offset))
#define find_next_bit(addr, size, offset) \
__find_next_bit((addr), (size), (offset))
/*
* The optimizer actually does good code for this case..
*/
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#ifdef __KERNEL__
#define __clear_bit(nr, addr) clear_bit(nr, addr)
#define ext2_set_bit test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
#define ext2_clear_bit test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
#define ext2_test_bit test_bit
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
/* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
#define minix_set_bit(nr,addr) set_bit(nr,addr)
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
static inline int
sched_find_first_bit (unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return 64 + __ffs(b[1]);
return __ffs(b[2]) + 128;
}
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_BITOPS_H */

View File

@@ -0,0 +1,21 @@
#ifndef _ASM_IA64_BREAK_H
#define _ASM_IA64_BREAK_H
/*
* IA-64 Linux break numbers.
*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* OS-specific debug break numbers:
*/
#define __IA64_BREAK_KDB 0x80100
/*
* OS-specific break numbers:
*/
#define __IA64_BREAK_SYSCALL 0x100000
#endif /* _ASM_IA64_BREAK_H */

View File

@@ -0,0 +1,15 @@
#ifndef _ASM_IA64_BUG_H
#define _ASM_IA64_BUG_H
#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
# define ia64_abort() __builtin_trap()
#else
# define ia64_abort() (*(volatile int *) 0 = 0)
#endif
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
/* should this BUG should be made generic? */
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
#endif

View File

@@ -0,0 +1,19 @@
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*
* Based on <asm-alpha/bugs.h>.
*
* Modified 1998, 1999, 2003
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#ifndef _ASM_IA64_BUGS_H
#define _ASM_IA64_BUGS_H
#include <asm/processor.h>
extern void check_bugs (void);
#endif /* _ASM_IA64_BUGS_H */

View File

@@ -0,0 +1,42 @@
#ifndef _ASM_IA64_BYTEORDER_H
#define _ASM_IA64_BYTEORDER_H
/*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#include <asm/types.h>
#include <asm/intrinsics.h>
#include <linux/compiler.h>
static __inline__ __attribute_const__ __u64
__ia64_swab64 (__u64 x)
{
__u64 result;
result = ia64_mux1(x, ia64_mux1_rev);
return result;
}
static __inline__ __attribute_const__ __u32
__ia64_swab32 (__u32 x)
{
return __ia64_swab64(x) >> 32;
}
static __inline__ __attribute_const__ __u16
__ia64_swab16(__u16 x)
{
return __ia64_swab64(x) >> 48;
}
#define __arch__swab64(x) __ia64_swab64(x)
#define __arch__swab32(x) __ia64_swab32(x)
#define __arch__swab16(x) __ia64_swab16(x)
#define __BYTEORDER_HAS_U64__
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_IA64_BYTEORDER_H */

View File

@@ -0,0 +1,30 @@
#ifndef _ASM_IA64_CACHE_H
#define _ASM_IA64_CACHE_H
#include <linux/config.h>
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/* Bytes per L1 (data) cache line. */
#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#ifdef CONFIG_SMP
# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
# define SMP_CACHE_BYTES L1_CACHE_BYTES
#else
/*
* The "aligned" directive can only _increase_ alignment, so this is
* safe and provides an easy way to avoid wasting space on a
* uni-processor:
*/
# define SMP_CACHE_SHIFT 3
# define SMP_CACHE_BYTES (1 << 3)
#endif
#endif /* _ASM_IA64_CACHE_H */

View File

@@ -0,0 +1,50 @@
#ifndef _ASM_IA64_CACHEFLUSH_H
#define _ASM_IA64_CACHEFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/page-flags.h>
#include <asm/bitops.h>
#include <asm/page.h>
/*
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
* to avoid them whenever possible.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* _ASM_IA64_CACHEFLUSH_H */

View File

@@ -0,0 +1,76 @@
#ifndef _ASM_IA64_CHECKSUM_H
#define _ASM_IA64_CHECKSUM_H
/*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
/*
* Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
* checksum, already complemented
*/
extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum);
extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum);
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern unsigned int csum_partial (const unsigned char * buff, int len,
unsigned int sum);
/*
* Same as csum_partial, but copies from src while it checksums.
*
* Here it is even more important to align src and dst on a 32-bit (or
* even better 64-bit) boundary.
*/
extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
int len, unsigned int sum,
int *errp);
extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
int len, unsigned int sum);
/*
* This routine is used for miscellaneous IP-like checksums, mainly in
* icmp.c
*/
extern unsigned short ip_compute_csum (unsigned char *buff, int len);
/*
* Fold a partial checksum without adding pseudo headers.
*/
static inline unsigned short
csum_fold (unsigned int sum)
{
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
}
#endif /* _ASM_IA64_CHECKSUM_H */

View File

@@ -0,0 +1,198 @@
#ifndef _ASM_IA64_COMPAT_H
#define _ASM_IA64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_key_t;
typedef s32 compat_pid_t;
typedef u16 compat_uid_t;
typedef u16 compat_gid_t;
typedef u32 compat_uid32_t;
typedef u32 compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
compat_uid_t st_uid;
compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
compat_uid32_t uid;
compat_gid32_t gid;
compat_uid32_t cuid;
compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
* comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *
compat_ptr (compat_uptr_t uptr)
{
return (void __user *) (unsigned long) uptr;
}
static __inline__ void __user *
compat_alloc_user_space (long len)
{
struct pt_regs *regs = ia64_task_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
#endif /* _ASM_IA64_COMPAT_H */

View File

@@ -0,0 +1,17 @@
#ifndef _ASM_IA64_CPU_H_
#define _ASM_IA64_CPU_H_
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/percpu.h>
struct ia64_cpu {
struct cpu cpu;
};
DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
DECLARE_PER_CPU(int, cpu_state);
#endif /* _ASM_IA64_CPU_H_ */

View File

@@ -0,0 +1,17 @@
#ifndef _ASM_IA64_CURRENT_H
#define _ASM_IA64_CURRENT_H
/*
* Modified 1998-2000
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#include <asm/intrinsics.h>
/*
* In kernel mode, thread pointer (r13) is used to point to the current task
* structure.
*/
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */

View File

@@ -0,0 +1,15 @@
#ifndef ASM_IA64_CYCLONE_H
#define ASM_IA64_CYCLONE_H
#ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone;
extern void __init cyclone_setup(void);
#else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0
static inline void cyclone_setup(void)
{
printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n");
}
#endif /* CONFIG_IA64_CYCLONE */
#endif /* !ASM_IA64_CYCLONE_H */

View File

@@ -0,0 +1,97 @@
#ifndef _ASM_IA64_DELAY_H
#define _ASM_IA64_DELAY_H
/*
* Delay routines using a pre-computed "cycles/usec" value.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
static __inline__ void
ia64_set_itm (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_ITM, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itm (void)
{
unsigned long result;
result = ia64_getreg(_IA64_REG_CR_ITM);
ia64_srlz_d();
return result;
}
static __inline__ void
ia64_set_itv (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_ITV, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itv (void)
{
return ia64_getreg(_IA64_REG_CR_ITV);
}
static __inline__ void
ia64_set_itc (unsigned long val)
{
ia64_setreg(_IA64_REG_AR_ITC, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itc (void)
{
unsigned long result;
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
#ifdef CONFIG_ITANIUM
while (unlikely((__s32) result == -1)) {
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
}
#endif
return result;
}
extern void ia64_delay_loop (unsigned long loops);
static __inline__ void
__delay (unsigned long loops)
{
if (unlikely(loops < 1))
return;
ia64_delay_loop (loops - 1);
}
static __inline__ void
udelay (unsigned long usecs)
{
unsigned long start = ia64_get_itc();
unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
while (ia64_get_itc() - start < cycles)
cpu_relax();
}
#endif /* _ASM_IA64_DELAY_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/div64.h>

View File

@@ -0,0 +1,70 @@
#ifndef _ASM_IA64_DMA_MAPPING_H
#define _ASM_IA64_DMA_MAPPING_H
/*
* Copyright (C) 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/machvec.h>
#define dma_alloc_coherent platform_dma_alloc_coherent
#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
#define dma_free_coherent platform_dma_free_coherent
#define dma_free_noncoherent platform_dma_free_coherent
#define dma_map_single platform_dma_map_single
#define dma_map_sg platform_dma_map_sg
#define dma_unmap_single platform_dma_unmap_single
#define dma_unmap_sg platform_dma_unmap_sg
#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
#define dma_sync_single_for_device platform_dma_sync_single_for_device
#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
#define dma_mapping_error platform_dma_mapping_error
#define dma_map_page(dev, pg, off, size, dir) \
dma_map_single(dev, page_address(pg) + (off), (size), (dir))
#define dma_unmap_page(dev, dma_addr, size, dir) \
dma_unmap_single(dev, dma_addr, size, dir)
/*
* Rest of this file is part of the "Advanced DMA API". Use at your own risk.
* See Documentation/DMA-API.txt for details.
*/
#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
dma_sync_single_for_cpu(dev, dma_handle, size, dir)
#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
dma_sync_single_for_device(dev, dma_handle, size, dir)
#define dma_supported platform_dma_supported
static inline int
dma_set_mask (struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline int
dma_get_cache_alignment (void)
{
extern int ia64_max_cacheline_size;
return ia64_max_cacheline_size;
}
static inline void
dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
{
/*
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
* ensure that dma_cache_sync() enforces order, hence the mb().
*/
mb();
}
#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
#endif /* _ASM_IA64_DMA_MAPPING_H */

View File

@@ -0,0 +1,23 @@
#ifndef _ASM_IA64_DMA_H
#define _ASM_IA64_DMA_H
/*
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/io.h> /* need byte IO */
extern unsigned long MAX_DMA_ADDRESS;
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
# define isa_dma_bridge_buggy (0)
#endif
#define free_dma(x)
#endif /* _ASM_IA64_DMA_H */

View File

@@ -0,0 +1,259 @@
#ifndef _ASM_IA64_ELF_H
#define _ASM_IA64_ELF_H
/*
* ELF-specific definitions.
*
* Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/fpu.h>
#include <asm/page.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
#define USE_ELF_CORE_DUMP
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
interpreted as follows by Linux: */
#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed.
* Typical use of this is to invoke "./ld.so someprog" to test out a
* new version of the loader. We need to make sure that it is out of
* the way of the program that it will "exec", and that there is
* sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
#define PT_IA_64_UNWIND 0x70000001
/* IA-64 relocations: */
#define R_IA64_NONE 0x00 /* none */
#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
/* IA-64 specific section flags: */
#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
/*
* We use (abuse?) this macro to insert the (empty) vm_area that is
* used to map the register backing store. I don't see any better
* place to do this, but we should discuss this with Linus once we can
* talk to him...
*/
extern void ia64_init_addr_space (void);
#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space()
/* ELF register definitions. This is needed for core dump support. */
/*
* elf_gregset_t contains the application-level state in the following order:
* r0-r31
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
* predicate registers (p0-p63)
* b0-b7
* ip cfm psr
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
typedef unsigned long elf_fpxregset_t;
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct ia64_fpreg elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
struct pt_regs; /* forward declaration... */
extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
/* This macro yields a bitmask that programs can use to figure out
what instruction set this CPU supports. */
#define ELF_HWCAP 0
/* This macro yields a string that ld.so will use to load
implementation specific libraries for optimization. Not terribly
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
/*
* Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
* them, start the architecture-specific ones at 32.
*/
#define AT_SYSINFO 32
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
#define elf_read_implies_exec(ex, have_pt_gnu_stack) \
(!(have_pt_gnu_stack) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
struct task_struct;
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
#define ARCH_DLINFO \
do { \
extern char __kernel_syscall_via_epc[]; \
NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
} while (0)
/*
* These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
* contents makes post-mortem fully interpretable later without matching up
* the same kernel and hardware config to see what PC values meant.
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used.
*/
#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum)
#define ELF_CORE_WRITE_EXTRA_PHDRS \
do { \
const struct elf_phdr *const gate_phdrs = \
(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
int i; \
Elf64_Off ofs = 0; \
for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
struct elf_phdr phdr = gate_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
phdr.p_filesz = phdr.p_memsz; \
if (ofs == 0) { \
ofs = phdr.p_offset = offset; \
offset += phdr.p_filesz; \
} \
else \
phdr.p_offset = ofs; \
} \
else \
phdr.p_offset += ofs; \
phdr.p_paddr = 0; /* match other core phdrs */ \
DUMP_WRITE(&phdr, sizeof(phdr)); \
} \
} while (0)
#define ELF_CORE_WRITE_EXTRA_DATA \
do { \
const struct elf_phdr *const gate_phdrs = \
(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
int i; \
for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
if (gate_phdrs[i].p_type == PT_LOAD) { \
DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \
PAGE_ALIGN(gate_phdrs[i].p_memsz)); \
break; \
} \
} \
} while (0)
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_ELF_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/errno.h>

View File

@@ -0,0 +1,84 @@
#ifndef _ASM_IA64_FCNTL_H
#define _ASM_IA64_FCNTL_H
/*
* Based on <asm-i386/fcntl.h>.
*
* Modified 1998-2000
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
/*
* open/fcntl - O_SYNC is only implemented on blocks devices and on
* files located on an ext2 file system
*/
#define O_ACCMODE 0003
#define O_RDONLY 00
#define O_WRONLY 01
#define O_RDWR 02
#define O_CREAT 0100 /* not fcntl */
#define O_EXCL 0200 /* not fcntl */
#define O_NOCTTY 0400 /* not fcntl */
#define O_TRUNC 01000 /* not fcntl */
#define O_APPEND 02000
#define O_NONBLOCK 04000
#define O_NDELAY O_NONBLOCK
#define O_SYNC 010000
#define FASYNC 020000 /* fcntl, for BSD compatibility */
#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
#define O_LARGEFILE 0100000
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 5
#define F_SETLK 6
#define F_SETLKW 7
#define F_SETOWN 8 /* for sockets. */
#define F_GETOWN 9 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
/* for old implementation of bsd flock () */
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
/* for leases */
#define F_INPROGRESS 16
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock */
#define LOCK_READ 64 /* ... Which allows concurrent read operations */
#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
};
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _ASM_IA64_FCNTL_H */

View File

@@ -0,0 +1,73 @@
#ifndef _ASM_IA64_FPSWA_H
#define _ASM_IA64_FPSWA_H
/*
* Floating-point Software Assist
*
* Copyright (C) 1999 Intel Corporation.
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
*/
typedef struct {
/* 4 * 128 bits */
unsigned long fp_lp[4*2];
} fp_state_low_preserved_t;
typedef struct {
/* 10 * 128 bits */
unsigned long fp_lv[10 * 2];
} fp_state_low_volatile_t;
typedef struct {
/* 16 * 128 bits */
unsigned long fp_hp[16 * 2];
} fp_state_high_preserved_t;
typedef struct {
/* 96 * 128 bits */
unsigned long fp_hv[96 * 2];
} fp_state_high_volatile_t;
/**
* floating point state to be passed to the FP emulation library by
* the trap/fault handler
*/
typedef struct {
unsigned long bitmask_low64;
unsigned long bitmask_high64;
fp_state_low_preserved_t *fp_state_low_preserved;
fp_state_low_volatile_t *fp_state_low_volatile;
fp_state_high_preserved_t *fp_state_high_preserved;
fp_state_high_volatile_t *fp_state_high_volatile;
} fp_state_t;
typedef struct {
unsigned long status;
unsigned long err0;
unsigned long err1;
unsigned long err2;
} fpswa_ret_t;
/**
* function header for the Floating Point software assist
* library. This function is invoked by the Floating point software
* assist trap/fault handler.
*/
typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
unsigned long *fsr, unsigned long *isr, unsigned long *preds,
unsigned long *ifs, fp_state_t *fp_state);
/**
* This is the FPSWA library interface as defined by EFI. We need to pass a
* pointer to the interface itself on a call to the assist library
*/
typedef struct {
unsigned int revision;
unsigned int reserved;
efi_fpswa_t fpswa;
} fpswa_interface_t;
extern fpswa_interface_t *fpswa_interface;
#endif /* _ASM_IA64_FPSWA_H */

View File

@@ -0,0 +1,66 @@
#ifndef _ASM_IA64_FPU_H
#define _ASM_IA64_FPU_H
/*
* Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/types.h>
/* floating point status register: */
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
#define FPSR_S0(x) ((x) << 6)
#define FPSR_S1(x) ((x) << 19)
#define FPSR_S2(x) (__IA64_UL(x) << 32)
#define FPSR_S3(x) (__IA64_UL(x) << 45)
/* floating-point status field controls: */
#define FPSF_FTZ (1 << 0) /* flush-to-zero */
#define FPSF_WRE (1 << 1) /* widest-range exponent */
#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
#define FPSF_TD (1 << 6) /* trap disabled */
/* floating-point status field flags: */
#define FPSF_V (1 << 7) /* invalid operation flag */
#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
/* floating-point rounding control: */
#define FPRC_NEAREST 0x0
#define FPRC_NEGINF 0x1
#define FPRC_POSINF 0x2
#define FPRC_TRUNC 0x3
#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
/* This default value is the same as HP-UX uses. Don't change it
without a very good reason. */
#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
| FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
| FPSR_S0 (FPSF_DEFAULT) \
| FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
| FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
| FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
# ifndef __ASSEMBLY__
struct ia64_fpreg {
union {
unsigned long bits[2];
long double __dummy; /* force 16-byte alignment */
} u;
};
# endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_FPU_H */

View File

@@ -0,0 +1,597 @@
#ifndef _ASM_IA64_GCC_INTRIN_H
#define _ASM_IA64_GCC_INTRIN_H
/*
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*/
#include <linux/compiler.h>
/* define this macro to get some asm stmts included in 'c' files */
#define ASM_SUPPORTED
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define ia64_barrier() asm volatile ("":::"memory")
#define ia64_stop() asm volatile (";;"::)
#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
register unsigned long ia64_r13 asm ("r13") __attribute_used__;
#define ia64_setreg(regnum, val) \
({ \
switch (regnum) { \
case _IA64_REG_PSR_L: \
asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
asm volatile ("mov ar%0=%1" :: \
"i" (regnum - _IA64_REG_AR_KR0), \
"r"(val): "memory"); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
asm volatile ("mov cr%0=%1" :: \
"i" (regnum - _IA64_REG_CR_DCR), \
"r"(val): "memory" ); \
break; \
case _IA64_REG_SP: \
asm volatile ("mov r12=%0" :: \
"r"(val): "memory"); \
break; \
case _IA64_REG_GP: \
asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
break; \
default: \
ia64_bad_param_for_setreg(); \
break; \
} \
})
#define ia64_getreg(regnum) \
({ \
__u64 ia64_intri_res; \
\
switch (regnum) { \
case _IA64_REG_GP: \
asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_IP: \
asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_PSR: \
asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
break; \
case _IA64_REG_TP: /* for current() */ \
ia64_intri_res = ia64_r13; \
break; \
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
: "i"(regnum - _IA64_REG_AR_KR0)); \
break; \
case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
: "i" (regnum - _IA64_REG_CR_DCR)); \
break; \
case _IA64_REG_SP: \
asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
break; \
default: \
ia64_bad_param_for_getreg(); \
break; \
} \
ia64_intri_res; \
})
#define ia64_hint_pause 0
#define ia64_hint(mode) \
({ \
switch (mode) { \
case ia64_hint_pause: \
asm volatile ("hint @pause" ::: "memory"); \
break; \
} \
})
/* Integer values for mux1 instruction */
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1(x, mode) \
({ \
__u64 ia64_intri_res; \
\
switch (mode) { \
case ia64_mux1_brcst: \
asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_mix: \
asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_shuf: \
asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_alt: \
asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
case ia64_mux1_rev: \
asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
break; \
} \
ia64_intri_res; \
})
#define ia64_popcnt(x) \
({ \
__u64 ia64_intri_res; \
asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
\
ia64_intri_res; \
})
#define ia64_getf_exp(x) \
({ \
long ia64_intri_res; \
\
asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
\
ia64_intri_res; \
})
#define ia64_shrp(a, b, count) \
({ \
__u64 ia64_intri_res; \
asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
ia64_intri_res; \
})
#define ia64_ldfs(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfd(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldfe(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf8(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_ldf_fill(regnum, x) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
})
#define ia64_stfs(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfd(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stfe(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf8(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_stf_spill(x, regnum) \
({ \
register double __f__ asm ("f"#regnum); \
asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
})
#define ia64_fetchadd4_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
asm volatile ("fetchadd4.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd4_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
asm volatile ("fetchadd4.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_acq(p, inc) \
({ \
\
__u64 ia64_intri_res; \
asm volatile ("fetchadd8.acq %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_fetchadd8_rel(p, inc) \
({ \
__u64 ia64_intri_res; \
asm volatile ("fetchadd8.rel %0=[%1],%2" \
: "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
: "memory"); \
\
ia64_intri_res; \
})
#define ia64_xchg1(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm volatile ("xchg1 %0=[%1],%2" \
: "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg2(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg4(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg8(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg1_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg2_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg4_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_acq(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_cmpxchg8_rel(ptr, new, old) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
\
asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
"=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
ia64_intri_res; \
})
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
#define ia64_invala() asm volatile ("invala" ::: "memory")
#define ia64_thash(addr) \
({ \
__u64 ia64_intri_res; \
asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
ia64_intri_res; \
})
#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define ia64_dv_serialize_data() asm volatile (".serialize.data");
# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
#else
# define ia64_dv_serialize_data()
# define ia64_dv_serialize_instruction()
#endif
#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
:: "r"(trnum), "r"(addr) : "memory")
#define ia64_tpa(addr) \
({ \
__u64 ia64_pa; \
asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
ia64_pa; \
})
#define __ia64_set_dbr(index, val) \
asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_ibr(index, val) \
asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pkr(index, val) \
asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pmc(index, val) \
asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_pmd(index, val) \
asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
#define ia64_set_rr(index, val) \
asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
#define ia64_get_cpuid(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
ia64_intri_res; \
})
#define __ia64_get_dbr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_ibr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pkr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pmc(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pmd(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_rr(index) \
({ \
__u64 ia64_intri_res; \
asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
ia64_intri_res; \
})
#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
#define ia64_ptcga(addr, size) \
do { \
asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
ia64_dv_serialize_data(); \
} while (0)
#define ia64_ptcl(addr, size) \
do { \
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
ia64_dv_serialize_data(); \
} while (0)
#define ia64_ptri(addr, size) \
asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
#define ia64_ptrd(addr, size) \
asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
break; \
} \
})
#define ia64_lfetch_fault(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
break; \
} \
})
#define ia64_lfetch_fault_excl(lfhint, y) \
({ \
switch (lfhint) { \
case ia64_lfhint_none: \
asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt1: \
asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nt2: \
asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
break; \
case ia64_lfhint_nta: \
asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
break; \
} \
})
#define ia64_intrin_local_irq_restore(x) \
do { \
asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
:: "r"((x)) : "p6", "p7", "memory"); \
} while (0)
#endif /* _ASM_IA64_GCC_INTRIN_H */

View File

@@ -0,0 +1,37 @@
#ifndef _ASM_IA64_HARDIRQ_H
#define _ASM_IA64_HARDIRQ_H
/*
* Modified 1998-2002, 2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/processor.h>
/*
* No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
*/
#define __ARCH_IRQ_STAT 1
#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
#define local_softirq_pending() (local_cpu_data->softirq_pending)
#define HARDIRQ_BITS 14
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
extern void __iomem *ipi_base_addr;
#endif /* _ASM_IA64_HARDIRQ_H */

View File

@@ -0,0 +1,14 @@
/*
* linux/include/asm-ia64/hdreg.h
*
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
#warning this file is obsolete, please do not use it
#ifndef __ASM_IA64_HDREG_H
#define __ASM_IA64_HDREG_H
typedef unsigned short ide_ioreg_t;
#endif /* __ASM_IA64_HDREG_H */

View File

@@ -0,0 +1,154 @@
#ifndef _ASM_IA64_HW_IRQ_H
#define _ASM_IA64_HW_IRQ_H
/*
* Copyright (C) 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/profile.h>
#include <asm/machvec.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
typedef u8 ia64_vector;
/*
* 0 special
*
* 1,3-14 are reserved from firmware
*
* 16-255 (vectored external interrupts) are available
*
* 15 spurious interrupt (see IVR)
*
* 16 lowest priority, 255 highest priority
*
* 15 classes of 16 interrupts each.
*/
#define IA64_MIN_VECTORED_IRQ 16
#define IA64_MAX_VECTORED_IRQ 255
#define IA64_NUM_VECTORS 256
#define AUTO_ASSIGN -1
#define IA64_SPURIOUS_INT_VECTOR 0x0f
/*
* Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
*/
#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
/*
* Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
*/
#define IA64_FIRST_DEVICE_VECTOR 0x30
#define IA64_LAST_DEVICE_VECTOR 0xe7
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
/* Used for encoding redirected irqs */
#define IA64_IRQ_REDIRECTED (1 << 31)
/* IA64 inter-cpu interrupt related definitions */
#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
/* Delivery modes for inter-cpu interrupts */
enum {
IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
};
extern __u8 isa_irq_to_vector_map[16];
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
static inline void
hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
{
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
}
/*
* Default implementations for the irq-descriptor API:
*/
extern irq_desc_t _irq_desc[NR_IRQS];
#ifndef CONFIG_IA64_GENERIC
static inline irq_desc_t *
__ia64_irq_desc (unsigned int irq)
{
return _irq_desc + irq;
}
static inline ia64_vector
__ia64_irq_to_vector (unsigned int irq)
{
return (ia64_vector) irq;
}
static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec)
{
return (unsigned int) vec;
}
#endif
/*
* Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
* vectors. On smaller systems, there is a one-to-one correspondence between interrupt
* vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
* domains meaning that the translation from vector number to irq number depends on the
* interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
* differences and provides a uniform means to translate between vector and irq numbers
* and to obtain the irq descriptor for a given irq number.
*/
/* Return a pointer to the irq descriptor for IRQ. */
static inline irq_desc_t *
irq_descp (int irq)
{
return platform_irq_desc(irq);
}
/* Extract the IA-64 vector that corresponds to IRQ. */
static inline ia64_vector
irq_to_vector (int irq)
{
return platform_irq_to_vector(irq);
}
/*
* Convert the local IA-64 vector to the corresponding irq number. This translation is
* done in the context of the interrupt domain that the currently executing CPU belongs
* to.
*/
static inline unsigned int
local_vector_to_irq (ia64_vector vec)
{
return platform_local_vector_to_irq(vec);
}
#endif /* _ASM_IA64_HW_IRQ_H */

View File

@@ -0,0 +1,38 @@
#ifndef _ASM_IA64_IA32_H
#define _ASM_IA64_IA32_H
#include <linux/config.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#define IA32_NR_syscalls 285 /* length of syscall table */
#define IA32_PAGE_SHIFT 12 /* 4KB pages */
#ifndef __ASSEMBLY__
# ifdef CONFIG_IA32_SUPPORT
extern void ia32_cpu_init (void);
extern void ia32_mem_init (void);
extern void ia32_gdt_init (void);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
# endif /* !CONFIG_IA32_SUPPORT */
/* Declare this unconditionally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long);
extern void ia32_drop_partial_page_list (struct task_struct *);
#else
# define ia32_copy_partial_page_list(a1, a2) 0
# define ia32_drop_partial_page_list(a1) do { ; } while (0)
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_IA32_H */

View File

@@ -0,0 +1,100 @@
/*
* Copyright (C) 2002,2003 Intel Corp.
* Jun Nakajima <jun.nakajima@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
*/
#ifndef _ASM_IA64_IA64REGS_H
#define _ASM_IA64_IA64REGS_H
/*
* Register Names for getreg() and setreg().
*
* The "magic" numbers happen to match the values used by the Intel compiler's
* getreg()/setreg() intrinsics.
*/
/* Special Registers */
#define _IA64_REG_IP 1016 /* getreg only */
#define _IA64_REG_PSR 1019
#define _IA64_REG_PSR_L 1019
/* General Integer Registers */
#define _IA64_REG_GP 1025 /* R1 */
#define _IA64_REG_R8 1032 /* R8 */
#define _IA64_REG_R9 1033 /* R9 */
#define _IA64_REG_SP 1036 /* R12 */
#define _IA64_REG_TP 1037 /* R13 */
/* Application Registers */
#define _IA64_REG_AR_KR0 3072
#define _IA64_REG_AR_KR1 3073
#define _IA64_REG_AR_KR2 3074
#define _IA64_REG_AR_KR3 3075
#define _IA64_REG_AR_KR4 3076
#define _IA64_REG_AR_KR5 3077
#define _IA64_REG_AR_KR6 3078
#define _IA64_REG_AR_KR7 3079
#define _IA64_REG_AR_RSC 3088
#define _IA64_REG_AR_BSP 3089
#define _IA64_REG_AR_BSPSTORE 3090
#define _IA64_REG_AR_RNAT 3091
#define _IA64_REG_AR_FCR 3093
#define _IA64_REG_AR_EFLAG 3096
#define _IA64_REG_AR_CSD 3097
#define _IA64_REG_AR_SSD 3098
#define _IA64_REG_AR_CFLAG 3099
#define _IA64_REG_AR_FSR 3100
#define _IA64_REG_AR_FIR 3101
#define _IA64_REG_AR_FDR 3102
#define _IA64_REG_AR_CCV 3104
#define _IA64_REG_AR_UNAT 3108
#define _IA64_REG_AR_FPSR 3112
#define _IA64_REG_AR_ITC 3116
#define _IA64_REG_AR_PFS 3136
#define _IA64_REG_AR_LC 3137
#define _IA64_REG_AR_EC 3138
/* Control Registers */
#define _IA64_REG_CR_DCR 4096
#define _IA64_REG_CR_ITM 4097
#define _IA64_REG_CR_IVA 4098
#define _IA64_REG_CR_PTA 4104
#define _IA64_REG_CR_IPSR 4112
#define _IA64_REG_CR_ISR 4113
#define _IA64_REG_CR_IIP 4115
#define _IA64_REG_CR_IFA 4116
#define _IA64_REG_CR_ITIR 4117
#define _IA64_REG_CR_IIPA 4118
#define _IA64_REG_CR_IFS 4119
#define _IA64_REG_CR_IIM 4120
#define _IA64_REG_CR_IHA 4121
#define _IA64_REG_CR_LID 4160
#define _IA64_REG_CR_IVR 4161 /* getreg only */
#define _IA64_REG_CR_TPR 4162
#define _IA64_REG_CR_EOI 4163
#define _IA64_REG_CR_IRR0 4164 /* getreg only */
#define _IA64_REG_CR_IRR1 4165 /* getreg only */
#define _IA64_REG_CR_IRR2 4166 /* getreg only */
#define _IA64_REG_CR_IRR3 4167 /* getreg only */
#define _IA64_REG_CR_ITV 4168
#define _IA64_REG_CR_PMV 4169
#define _IA64_REG_CR_CMCV 4170
#define _IA64_REG_CR_LRR0 4176
#define _IA64_REG_CR_LRR1 4177
/* Indirect Registers for getindreg() and setindreg() */
#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
#define _IA64_REG_INDR_DBR 9001
#define _IA64_REG_INDR_IBR 9002
#define _IA64_REG_INDR_PKR 9003
#define _IA64_REG_INDR_PMC 9004
#define _IA64_REG_INDR_PMD 9005
#define _IA64_REG_INDR_RR 9006
#endif /* _ASM_IA64_IA64REGS_H */

View File

@@ -0,0 +1,71 @@
/*
* linux/include/asm-ia64/ide.h
*
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
/*
* This file contains the ia64 architecture specific IDE code.
*/
#ifndef __ASM_IA64_IDE_H
#define __ASM_IA64_IDE_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/irq.h>
#ifndef MAX_HWIFS
# ifdef CONFIG_PCI
#define MAX_HWIFS 10
# else
#define MAX_HWIFS 6
# endif
#endif
#define IDE_ARCH_OBSOLETE_DEFAULTS
static inline int ide_default_irq(unsigned long base)
{
switch (base) {
case 0x1f0: return isa_irq_to_vector(14);
case 0x170: return isa_irq_to_vector(15);
case 0x1e8: return isa_irq_to_vector(11);
case 0x168: return isa_irq_to_vector(10);
case 0x1e0: return isa_irq_to_vector(8);
case 0x160: return isa_irq_to_vector(12);
default:
return 0;
}
}
static inline unsigned long ide_default_io_base(int index)
{
switch (index) {
case 0: return 0x1f0;
case 1: return 0x170;
case 2: return 0x1e8;
case 3: return 0x168;
case 4: return 0x1e0;
case 5: return 0x160;
default:
return 0;
}
}
#define IDE_ARCH_OBSOLETE_INIT
#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
#ifdef CONFIG_PCI
#define ide_init_default_irq(base) (0)
#else
#define ide_init_default_irq(base) ide_default_irq(base)
#endif
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
#endif /* __ASM_IA64_IDE_H */

View File

@@ -0,0 +1,257 @@
#ifndef _ASM_IA64_INTEL_INTRIN_H
#define _ASM_IA64_INTEL_INTRIN_H
/*
* Intel Compiler Intrinsics
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
#include <asm/types.h>
void __lfetch(int lfhint, void *y);
void __lfetch_excl(int lfhint, void *y);
void __lfetch_fault(int lfhint, void *y);
void __lfetch_fault_excl(int lfhint, void *y);
/* In the following, whichFloatReg should be an integer from 0-127 */
void __ldfs(const int whichFloatReg, void *src);
void __ldfd(const int whichFloatReg, void *src);
void __ldfe(const int whichFloatReg, void *src);
void __ldf8(const int whichFloatReg, void *src);
void __ldf_fill(const int whichFloatReg, void *src);
void __stfs(void *dst, const int whichFloatReg);
void __stfd(void *dst, const int whichFloatReg);
void __stfe(void *dst, const int whichFloatReg);
void __stf8(void *dst, const int whichFloatReg);
void __stf_spill(void *dst, const int whichFloatReg);
void __st1_rel(void *dst, const __s8 value);
void __st2_rel(void *dst, const __s16 value);
void __st4_rel(void *dst, const __s32 value);
void __st8_rel(void *dst, const __s64 value);
__u8 __ld1_acq(void *src);
__u16 __ld2_acq(void *src);
__u32 __ld4_acq(void *src);
__u64 __ld8_acq(void *src);
__u64 __fetchadd4_acq(__u32 *addend, const int increment);
__u64 __fetchadd4_rel(__u32 *addend, const int increment);
__u64 __fetchadd8_acq(__u64 *addend, const int increment);
__u64 __fetchadd8_rel(__u64 *addend, const int increment);
__u64 __getf_exp(double d);
/* OS Related Itanium(R) Intrinsics */
/* The names to use for whichReg and whichIndReg below come from
the include file asm/ia64regs.h */
__u64 __getIndReg(const int whichIndReg, __s64 index);
__u64 __getReg(const int whichReg);
void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
void __setReg(const int whichReg, __u64 value);
void __mf(void);
void __mfa(void);
void __synci(void);
void __itcd(__s64 pa);
void __itci(__s64 pa);
void __itrd(__s64 whichTransReg, __s64 pa);
void __itri(__s64 whichTransReg, __s64 pa);
void __ptce(__s64 va);
void __ptcl(__s64 va, __s64 pagesz);
void __ptcg(__s64 va, __s64 pagesz);
void __ptcga(__s64 va, __s64 pagesz);
void __ptri(__s64 va, __s64 pagesz);
void __ptrd(__s64 va, __s64 pagesz);
void __invala (void);
void __invala_gr(const int whichGeneralReg /* 0-127 */ );
void __invala_fr(const int whichFloatReg /* 0-127 */ );
void __nop(const int);
void __fc(__u64 *addr);
void __sum(int mask);
void __rum(int mask);
void __ssm(int mask);
void __rsm(int mask);
__u64 __thash(__s64);
__u64 __ttag(__s64);
__s64 __tpa(__s64);
/* Intrinsics for implementing get/put_user macros */
void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
/* This intrinsic does not generate code, it creates a barrier across which
* the compiler will not schedule data access instructions.
*/
void __memory_barrier(void);
void __isrlz(void);
void __dsrlz(void);
__u64 _m64_mux1(__u64 a, const int n);
__u64 __thash(__u64);
/* Lock and Atomic Operation Related Intrinsics */
__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
__s64 _m64_shrp(__s64 a, __s64 b, const int count);
__s64 _m64_popcnt(__s64 a);
#define ia64_barrier() __memory_barrier()
#define ia64_stop() /* Nothing: As of now stop bit is generated for each
* intrinsic
*/
#define ia64_getreg __getReg
#define ia64_setreg __setReg
#define ia64_hint(x)
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1 _m64_mux1
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
#define ia64_tpa __tpa
#define ia64_invala __invala
#define ia64_invala_gr __invala_gr
#define ia64_invala_fr __invala_fr
#define ia64_nop __nop
#define ia64_sum __sum
#define ia64_ssm __ssm
#define ia64_rum __rum
#define ia64_rsm __rsm
#define ia64_fc __fc
#define ia64_ldfs __ldfs
#define ia64_ldfd __ldfd
#define ia64_ldfe __ldfe
#define ia64_ldf8 __ldf8
#define ia64_ldf_fill __ldf_fill
#define ia64_stfs __stfs
#define ia64_stfd __stfd
#define ia64_stfe __stfe
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
#define ia64_fetchadd4_rel __fetchadd4_rel
#define ia64_fetchadd8_acq __fetchadd8_acq
#define ia64_fetchadd8_rel __fetchadd8_rel
#define ia64_xchg1 _InterlockedExchange8
#define ia64_xchg2 _InterlockedExchange16
#define ia64_xchg4 _InterlockedExchange
#define ia64_xchg8 _InterlockedExchange64
#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
#define __ia64_set_dbr(index, val) \
__setIndReg(_IA64_REG_INDR_DBR, index, val)
#define ia64_set_ibr(index, val) \
__setIndReg(_IA64_REG_INDR_IBR, index, val)
#define ia64_set_pkr(index, val) \
__setIndReg(_IA64_REG_INDR_PKR, index, val)
#define ia64_set_pmc(index, val) \
__setIndReg(_IA64_REG_INDR_PMC, index, val)
#define ia64_set_pmd(index, val) \
__setIndReg(_IA64_REG_INDR_PMD, index, val)
#define ia64_set_rr(index, val) \
__setIndReg(_IA64_REG_INDR_RR, index, val)
#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
#define ia64_srlz_d __dsrlz
#define ia64_srlz_i __isrlz
#define ia64_dv_serialize_data()
#define ia64_dv_serialize_instruction()
#define ia64_st1_rel __st1_rel
#define ia64_st2_rel __st2_rel
#define ia64_st4_rel __st4_rel
#define ia64_st8_rel __st8_rel
#define ia64_ld1_acq __ld1_acq
#define ia64_ld2_acq __ld2_acq
#define ia64_ld4_acq __ld4_acq
#define ia64_ld8_acq __ld8_acq
#define ia64_sync_i __synci
#define ia64_thash __thash
#define ia64_ttag __ttag
#define ia64_itcd __itcd
#define ia64_itci __itci
#define ia64_itrd __itrd
#define ia64_itri __itri
#define ia64_ptce __ptce
#define ia64_ptcl __ptcl
#define ia64_ptcg __ptcg
#define ia64_ptcga __ptcga
#define ia64_ptri __ptri
#define ia64_ptrd __ptrd
#define ia64_dep_mi _m64_dep_mi
/* Values for lfhint in __lfetch and __lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
#define ia64_lfetch_fault __lfetch_fault
#define ia64_lfetch_fault_excl __lfetch_fault_excl
#define ia64_intrin_local_irq_restore(x) \
do { \
if ((x) != 0) { \
ia64_ssm(IA64_PSR_I); \
ia64_srlz_d(); \
} else { \
ia64_rsm(IA64_PSR_I); \
} \
} while (0)
#endif /* _ASM_IA64_INTEL_INTRIN_H */

View File

@@ -0,0 +1,181 @@
#ifndef _ASM_IA64_INTRINSICS_H
#define _ASM_IA64_INTRINSICS_H
/*
* Compiler-dependent intrinsics.
*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef __ASSEMBLY__
#include <linux/config.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
# include <asm/intel_intrin.h>
#else
# include <asm/gcc_intrin.h>
#endif
/*
* Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value.
*/
extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
#define IA64_FETCHADD(tmp,v,n,sz,sem) \
({ \
switch (sz) { \
case 4: \
tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
break; \
\
case 8: \
tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
break; \
\
default: \
__bad_size_for_ia64_fetch_and_add(); \
} \
})
#define ia64_fetchadd(i,v,sem) \
({ \
__u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \
/* Can't use a switch () here: gcc isn't always smart enough for that... */ \
if ((i) == -16) \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
else if ((i) == -8) \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \
(__typeof__(*(v))) (_tmp); /* return old value */ \
})
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
*/
extern void ia64_xchg_called_with_bad_pointer (void);
#define __xchg(x,ptr,size) \
({ \
unsigned long __xchg_result; \
\
switch (size) { \
case 1: \
__xchg_result = ia64_xchg1((__u8 *)ptr, x); \
break; \
\
case 2: \
__xchg_result = ia64_xchg2((__u16 *)ptr, x); \
break; \
\
case 4: \
__xchg_result = ia64_xchg4((__u32 *)ptr, x); \
break; \
\
case 8: \
__xchg_result = ia64_xchg8((__u64 *)ptr, x); \
break; \
default: \
ia64_xchg_called_with_bad_pointer(); \
} \
__xchg_result; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __HAVE_ARCH_CMPXCHG 1
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern long ia64_cmpxchg_called_with_bad_pointer (void);
#define ia64_cmpxchg(sem,ptr,old,new,size) \
({ \
__u64 _o_, _r_; \
\
switch (size) { \
case 1: _o_ = (__u8 ) (long) (old); break; \
case 2: _o_ = (__u16) (long) (old); break; \
case 4: _o_ = (__u32) (long) (old); break; \
case 8: _o_ = (__u64) (long) (old); break; \
default: break; \
} \
switch (size) { \
case 1: \
_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
break; \
\
case 2: \
_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
break; \
\
case 4: \
_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
break; \
\
case 8: \
_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
break; \
\
default: \
_r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \
} \
(__typeof__(old)) _r_; \
})
#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */
#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
# define CMPXCHG_BUGCHECK(v) \
do { \
if (_cmpxchg_bugcheck_count-- <= 0) { \
void *ip; \
extern int printk(const char *fmt, ...); \
ip = (void *) ia64_getreg(_IA64_REG_IP); \
printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
break; \
} \
} while (0)
#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
# define CMPXCHG_BUGCHECK_DECL
# define CMPXCHG_BUGCHECK(v)
#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
#endif
#endif /* _ASM_IA64_INTRINSICS_H */

View File

@@ -0,0 +1,489 @@
#ifndef _ASM_IA64_IO_H
#define _ASM_IA64_IO_H
/*
* This file contains the definitions for the emulated IO instructions
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
* versions of the single-IO instructions (inb_p/inw_p/..).
*
* This file is not meant to be obfuscating: it's just complicated to
* (a) handle it all in a way that makes gcc able to optimize it as
* well as possible and (b) trying to avoid writing the same thing
* over and over again with slight variations and possibly making a
* mistake somewhere.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
/* We don't use IO slowdowns on the ia64, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
/*
* The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
* large machines may have multiple other I/O spaces so we can't place any a priori limit
* on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
*/
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
#define MAX_IO_SPACES_BITS 4
#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
#define IO_SPACE_BITS 24
#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff))
struct io_space {
unsigned long mmio_base; /* base in MMIO space */
int sparse;
};
extern struct io_space io_space[];
extern unsigned int num_io_spaces;
# ifdef __KERNEL__
/*
* All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
* 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
* 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
*
* ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
* code that uses bare port numbers without the prerequisite pci_iomap().
*/
#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
#define PIO_MASK (PIO_OFFSET - 1)
#define PIO_RESERVED __IA64_UNCACHED_OFFSET
#define HAVE_ARCH_PIO_SIZE
#include <asm/intrinsics.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm-generic/iomap.h>
/*
* Change virtual addresses to physical addresses and vv.
*/
static inline unsigned long
virt_to_phys (volatile void *address)
{
return (unsigned long) address - PAGE_OFFSET;
}
static inline void*
phys_to_virt (unsigned long address)
{
return (void *) (address + PAGE_OFFSET);
}
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
/*
* The following two macros are deprecated and scheduled for removal.
* Please use the PCI-DMA interface defined in <asm/pci.h> instead.
*/
#define bus_to_virt phys_to_virt
#define virt_to_bus virt_to_phys
#define page_to_bus page_to_phys
# endif /* KERNEL */
/*
* Memory fence w/accept. This should never be used in code that is
* not IA-64 specific.
*/
#define __ia64_mf_a() ia64_mfa()
/**
* ___ia64_mmiowb - I/O write barrier
*
* Ensure ordering of I/O space writes. This will make sure that writes
* following the barrier will arrive after all previous writes. For most
* ia64 platforms, this is a simple 'mf.a' instruction.
*
* See Documentation/DocBook/deviceiobook.tmpl for more information.
*/
static inline void ___ia64_mmiowb(void)
{
ia64_mfa();
}
static inline const unsigned long
__ia64_get_io_port_base (void)
{
extern unsigned long ia64_iobase;
return ia64_iobase;
}
static inline void*
__ia64_mk_io_addr (unsigned long port)
{
struct io_space *space;
unsigned long offset;
space = &io_space[IO_SPACE_NR(port)];
port = IO_SPACE_PORT(port);
if (space->sparse)
offset = IO_SPACE_SPARSE_ENCODING(port);
else
offset = port;
return (void *) (space->mmio_base | offset);
}
#define __ia64_inb ___ia64_inb
#define __ia64_inw ___ia64_inw
#define __ia64_inl ___ia64_inl
#define __ia64_outb ___ia64_outb
#define __ia64_outw ___ia64_outw
#define __ia64_outl ___ia64_outl
#define __ia64_readb ___ia64_readb
#define __ia64_readw ___ia64_readw
#define __ia64_readl ___ia64_readl
#define __ia64_readq ___ia64_readq
#define __ia64_readb_relaxed ___ia64_readb
#define __ia64_readw_relaxed ___ia64_readw
#define __ia64_readl_relaxed ___ia64_readl
#define __ia64_readq_relaxed ___ia64_readq
#define __ia64_writeb ___ia64_writeb
#define __ia64_writew ___ia64_writew
#define __ia64_writel ___ia64_writel
#define __ia64_writeq ___ia64_writeq
#define __ia64_mmiowb ___ia64_mmiowb
/*
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
* that the access has completed before executing other I/O accesses. Since we're doing
* the accesses through an uncachable (UC) translation, the CPU will execute them in
* program order. However, we still need to tell the compiler not to shuffle them around
* during optimization, which is why we use "volatile" pointers.
*/
static inline unsigned int
___ia64_inb (unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
unsigned char ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
static inline unsigned int
___ia64_inw (unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
unsigned short ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
static inline unsigned int
___ia64_inl (unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
unsigned int ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
static inline void
___ia64_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = __ia64_mk_io_addr(port);
*addr = val;
__ia64_mf_a();
}
static inline void
___ia64_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = __ia64_mk_io_addr(port);
*addr = val;
__ia64_mf_a();
}
static inline void
___ia64_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = __ia64_mk_io_addr(port);
*addr = val;
__ia64_mf_a();
}
static inline void
__insb (unsigned long port, void *dst, unsigned long count)
{
unsigned char *dp = dst;
while (count--)
*dp++ = platform_inb(port);
}
static inline void
__insw (unsigned long port, void *dst, unsigned long count)
{
unsigned short *dp = dst;
while (count--)
*dp++ = platform_inw(port);
}
static inline void
__insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int *dp = dst;
while (count--)
*dp++ = platform_inl(port);
}
static inline void
__outsb (unsigned long port, const void *src, unsigned long count)
{
const unsigned char *sp = src;
while (count--)
platform_outb(*sp++, port);
}
static inline void
__outsw (unsigned long port, const void *src, unsigned long count)
{
const unsigned short *sp = src;
while (count--)
platform_outw(*sp++, port);
}
static inline void
__outsl (unsigned long port, const void *src, unsigned long count)
{
const unsigned int *sp = src;
while (count--)
platform_outl(*sp++, port);
}
/*
* Unfortunately, some platforms are broken and do not follow the IA-64 architecture
* specification regarding legacy I/O support. Thus, we have to make these operations
* platform dependent...
*/
#define __inb platform_inb
#define __inw platform_inw
#define __inl platform_inl
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
#define __mmiowb platform_mmiowb
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define insb(p,d,c) __insb(p,d,c)
#define insw(p,d,c) __insw(p,d,c)
#define insl(p,d,c) __insl(p,d,c)
#define outb(v,p) __outb(v,p)
#define outw(v,p) __outw(v,p)
#define outl(v,p) __outl(v,p)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
#define mmiowb() __mmiowb()
/*
* The address passed to these functions are ioremap()ped already.
*
* We need these to be machine vectors since some platforms don't provide
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
* a good idea). Writes are ok though for all existing ia64 platforms (and
* hopefully it'll stay that way).
*/
static inline unsigned char
___ia64_readb (const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short
___ia64_readw (const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int
___ia64_readl (const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long
___ia64_readq (const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *) addr;
}
static inline void
__writeb (unsigned char val, volatile void __iomem *addr)
{
*(volatile unsigned char __force *) addr = val;
}
static inline void
__writew (unsigned short val, volatile void __iomem *addr)
{
*(volatile unsigned short __force *) addr = val;
}
static inline void
__writel (unsigned int val, volatile void __iomem *addr)
{
*(volatile unsigned int __force *) addr = val;
}
static inline void
__writeq (unsigned long val, volatile void __iomem *addr)
{
*(volatile unsigned long __force *) addr = val;
}
#define __readb platform_readb
#define __readw platform_readw
#define __readl platform_readl
#define __readq platform_readq
#define __readb_relaxed platform_readb_relaxed
#define __readw_relaxed platform_readw_relaxed
#define __readl_relaxed platform_readl_relaxed
#define __readq_relaxed platform_readq_relaxed
#define readb(a) __readb((a))
#define readw(a) __readw((a))
#define readl(a) __readl((a))
#define readq(a) __readq((a))
#define readb_relaxed(a) __readb_relaxed((a))
#define readw_relaxed(a) __readw_relaxed((a))
#define readl_relaxed(a) __readl_relaxed((a))
#define readq_relaxed(a) __readq_relaxed((a))
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_readq readq
#define __raw_readb_relaxed readb_relaxed
#define __raw_readw_relaxed readw_relaxed
#define __raw_readl_relaxed readl_relaxed
#define __raw_readq_relaxed readq_relaxed
#define writeb(v,a) __writeb((v), (a))
#define writew(v,a) __writew((v), (a))
#define writel(v,a) __writel((v), (a))
#define writeq(v,a) __writeq((v), (a))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
#ifndef inb_p
# define inb_p inb
#endif
#ifndef inw_p
# define inw_p inw
#endif
#ifndef inl_p
# define inl_p inl
#endif
#ifndef outb_p
# define outb_p outb
#endif
#ifndef outw_p
# define outw_p outw
#endif
#ifndef outl_p
# define outl_p outl
#endif
/*
* An "address" in IO memory space is not clearly either an integer or a pointer. We will
* accept both, thus the casts.
*
* On ia-64, we access the physical I/O memory space through the uncached kernel region.
*/
static inline void __iomem *
ioremap (unsigned long offset, unsigned long size)
{
return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
}
static inline void
iounmap (volatile void __iomem *addr)
{
}
#define ioremap_nocache(o,s) ioremap(o,s)
# ifdef __KERNEL__
/*
* String version of IO memory access ops:
*/
extern void __ia64_memcpy_fromio (void *, volatile void __iomem *, long);
extern void __ia64_memcpy_toio (volatile void __iomem *, void *, long);
extern void __ia64_memset_c_io (volatile void __iomem *, unsigned long, long);
#define memcpy_fromio(to,from,len) __ia64_memcpy_fromio((to), (from),(len))
#define memcpy_toio(to,from,len) __ia64_memcpy_toio((to),(from),(len))
#define memset_io(addr,c,len) __ia64_memset_c_io((addr), 0x0101010101010101UL*(u8)(c), \
(len))
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
# endif /* __KERNEL__ */
/*
* Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
* BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
* On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
* SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
* over BIO-level virtual merging.
*/
extern unsigned long ia64_max_iommu_merge_mask;
#if 1
#define BIO_VMERGE_BOUNDARY 0
#else
/*
* It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
* replaced by dma_merge_mask() or something of that sort. Note: the only way
* BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
* expanded into:
*
* addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
*
* which is precisely what we want.
*/
#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
#endif
#endif /* _ASM_IA64_IO_H */

View File

@@ -0,0 +1,77 @@
#ifndef _ASM_IA64_IOCTL_H
#define _ASM_IA64_IOCTL_H
/*
* Based on <asm-i386/ioctl.h>.
*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
/* ioctl command encoding: 32 bits total, command in lower 16 bits,
* size of the parameter structure in the lower 14 bits of the
* upper 16 bits.
* Encoding the size of the parameter structure in the ioctl request
* is useful for catching programs compiled with old versions
* and to avoid overwriting user space outside the user buffer area.
* The highest 2 bits are reserved for indicating the ``access mode''.
* NOTE: This limits the max parameter size to 16kB -1 !
*/
/*
* The following is for compatibility across the various Linux
* platforms. The ia64 ioctl numbering scheme doesn't really enforce
* a type field. De facto, however, the top 8 bits of the lower 16
* bits are indeed used as a type field, so we might just as well make
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
#define _IOC_SIZEBITS 14
#define _IOC_DIRBITS 2
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
/*
* Direction bits.
*/
#define _IOC_NONE 0U
#define _IOC_WRITE 1U
#define _IOC_READ 2U
#define _IOC(dir,type,nr,size) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
/* ...and for the drivers/sound files... */
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
#endif /* _ASM_IA64_IOCTL_H */

View File

@@ -0,0 +1 @@
#include <linux/ioctl32.h>

View File

@@ -0,0 +1,89 @@
#ifndef _ASM_IA64_IOCTLS_H
#define _ASM_IA64_IOCTLS_H
/*
* Based on <asm-i386/ioctls.h>
*
* Modified 1998, 1999, 2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif /* _ASM_IA64_IOCTLS_H */

View File

@@ -0,0 +1,106 @@
#ifndef __ASM_IA64_IOSAPIC_H
#define __ASM_IA64_IOSAPIC_H
#define IOSAPIC_REG_SELECT 0x0
#define IOSAPIC_WINDOW 0x10
#define IOSAPIC_EOI 0x40
#define IOSAPIC_VERSION 0x1
/*
* Redirection table entry
*/
#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
#define IOSAPIC_DEST_SHIFT 16
/*
* Delivery mode
*/
#define IOSAPIC_DELIVERY_SHIFT 8
#define IOSAPIC_FIXED 0x0
#define IOSAPIC_LOWEST_PRIORITY 0x1
#define IOSAPIC_PMI 0x2
#define IOSAPIC_NMI 0x4
#define IOSAPIC_INIT 0x5
#define IOSAPIC_EXTINT 0x7
/*
* Interrupt polarity
*/
#define IOSAPIC_POLARITY_SHIFT 13
#define IOSAPIC_POL_HIGH 0
#define IOSAPIC_POL_LOW 1
/*
* Trigger mode
*/
#define IOSAPIC_TRIGGER_SHIFT 15
#define IOSAPIC_EDGE 0
#define IOSAPIC_LEVEL 1
/*
* Mask bit
*/
#define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
#ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC
#define NR_IOSAPICS 256
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
return readl(iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
writel(val, iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
{
writel(vector, iosapic + IOSAPIC_EOI);
}
extern void __init iosapic_system_init (int pcat_compat);
extern void __init iosapic_init (unsigned long address,
unsigned int gsi_base);
extern int gsi_to_vector (unsigned int gsi);
extern int gsi_to_irq (unsigned int gsi);
extern void iosapic_enable_intr (unsigned int vector);
extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
unsigned long trigger);
extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long polarity,
unsigned long trigger);
extern int __init iosapic_register_platform_intr (u32 int_type,
unsigned int gsi,
int pmi_vector,
u16 eid, u16 id,
unsigned long polarity,
unsigned long trigger);
extern unsigned int iosapic_version (char __iomem *addr);
extern void iosapic_pci_fixup (int);
#ifdef CONFIG_NUMA
extern void __init map_iosapic_to_node (unsigned int, int);
#endif
#else
#define iosapic_system_init(pcat_compat) do { } while (0)
#define iosapic_init(address,gsi_base) do { } while (0)
#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
polarity,trigger) (gsi)
#endif
# endif /* !__ASSEMBLY__ */
#endif /* __ASM_IA64_IOSAPIC_H */

View File

@@ -0,0 +1,28 @@
#ifndef _ASM_IA64_IPCBUF_H
#define _ASM_IA64_IPCBUF_H
/*
* The ipc64_perm structure for IA-64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit seq
* - 2 miscellaneous 64-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
unsigned short __pad1;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _ASM_IA64_IPCBUF_H */

View File

@@ -0,0 +1,43 @@
#ifndef _ASM_IA64_IRQ_H
#define _ASM_IA64_IRQ_H
/*
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
* 01/20/99 S.Eranian added keyboard interrupt
* 02/29/00 D.Mosberger moved most things into hw_irq.h
*/
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
static __inline__ int
irq_canonicalize (int irq)
{
/*
* We do the legacy thing here of pretending that irqs < 16
* are 8259 irqs. This really shouldn't be necessary at all,
* but we keep it here as serial.c still uses it...
*/
return ((irq == 2) ? 9 : irq);
}
extern void disable_irq (unsigned int);
extern void disable_irq_nosync (unsigned int);
extern void enable_irq (unsigned int);
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
#ifdef CONFIG_SMP
extern void move_irq(int irq);
#else
#define move_irq(irq)
#endif
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* _ASM_IA64_IRQ_H */

View File

@@ -0,0 +1,31 @@
#ifndef _ASM_IA64_KMAP_TYPES_H
#define _ASM_IA64_KMAP_TYPES_H
#include <linux/config.h>
#ifdef CONFIG_DEBUG_HIGHMEM
# define D(n) __KM_FENCE_##n ,
#else
# define D(n)
#endif
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_SOFTIRQ0,
D(12) KM_SOFTIRQ1,
D(13) KM_TYPE_NR
};
#undef D
#endif /* _ASM_IA64_KMAP_TYPES_H */

View File

@@ -0,0 +1,162 @@
#ifndef _ASM_IA64_KREGS_H
#define _ASM_IA64_KREGS_H
/*
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* This file defines the kernel register usage convention used by Linux/ia64.
*/
/*
* Kernel registers:
*/
#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
#define _IA64_KR_PASTE(x,y) x##y
#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
/*
* Translation registers:
*/
#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */
/* Processor status register bits: */
#define IA64_PSR_BE_BIT 1
#define IA64_PSR_UP_BIT 2
#define IA64_PSR_AC_BIT 3
#define IA64_PSR_MFL_BIT 4
#define IA64_PSR_MFH_BIT 5
#define IA64_PSR_IC_BIT 13
#define IA64_PSR_I_BIT 14
#define IA64_PSR_PK_BIT 15
#define IA64_PSR_DT_BIT 17
#define IA64_PSR_DFL_BIT 18
#define IA64_PSR_DFH_BIT 19
#define IA64_PSR_SP_BIT 20
#define IA64_PSR_PP_BIT 21
#define IA64_PSR_DI_BIT 22
#define IA64_PSR_SI_BIT 23
#define IA64_PSR_DB_BIT 24
#define IA64_PSR_LP_BIT 25
#define IA64_PSR_TB_BIT 26
#define IA64_PSR_RT_BIT 27
/* The following are not affected by save_flags()/restore_flags(): */
#define IA64_PSR_CPL0_BIT 32
#define IA64_PSR_CPL1_BIT 33
#define IA64_PSR_IS_BIT 34
#define IA64_PSR_MC_BIT 35
#define IA64_PSR_IT_BIT 36
#define IA64_PSR_ID_BIT 37
#define IA64_PSR_DA_BIT 38
#define IA64_PSR_DD_BIT 39
#define IA64_PSR_SS_BIT 40
#define IA64_PSR_RI_BIT 41
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA_BIT 45
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
execve(). */
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
/* The following are not affected by save_flags()/restore_flags(): */
#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
/* User mask bits: */
#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
/* Default Control Register */
#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
#define IA64_DCR_BE_BIT 1 /* big-endian default */
#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
#define IA64_DCR_DR_BIT 12 /* defer access right faults */
#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
#define IA64_DCR_DD_BIT 14 /* defer debug faults */
#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
/* Interrupt Status Register */
#define IA64_ISR_X_BIT 32 /* execute access */
#define IA64_ISR_W_BIT 33 /* write access */
#define IA64_ISR_R_BIT 34 /* read access */
#define IA64_ISR_NA_BIT 35 /* non-access */
#define IA64_ISR_SP_BIT 36 /* speculative load exception */
#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
#define IA64_ISR_CODE_MASK 0xf
#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
/* ISR code field for non-access instructions */
#define IA64_ISR_CODE_TPA 0
#define IA64_ISR_CODE_FC 1
#define IA64_ISR_CODE_PROBE 2
#define IA64_ISR_CODE_TAK 3
#define IA64_ISR_CODE_LFETCH 4
#define IA64_ISR_CODE_PROBEF 5
#endif /* _ASM_IA64_kREGS_H */

View File

@@ -0,0 +1,6 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
#endif

View File

@@ -0,0 +1,50 @@
#ifndef _ASM_IA64_LOCAL_H
#define _ASM_IA64_LOCAL_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/percpu.h>
typedef struct {
atomic64_t val;
} local_t;
#define LOCAL_INIT(i) ((local_t) { { (i) } })
#define local_read(l) atomic64_read(&(l)->val)
#define local_set(l, i) atomic64_set(&(l)->val, i)
#define local_inc(l) atomic64_inc(&(l)->val)
#define local_dec(l) atomic64_dec(&(l)->val)
#define local_add(l) atomic64_add(&(l)->val)
#define local_sub(l) atomic64_sub(&(l)->val)
/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
#define __local_inc(l) (++(l)->val.counter)
#define __local_dec(l) (--(l)->val.counter)
#define __local_add(i,l) ((l)->val.counter += (i))
#define __local_sub(i,l) ((l)->val.counter -= (i))
/*
* Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
* not an address.
*/
#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
/*
* Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
* etc.
*/
#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
#endif /* _ASM_IA64_LOCAL_H */

View File

@@ -0,0 +1,379 @@
/*
* Machine vector for IA-64.
*
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef _ASM_IA64_MACHVEC_H
#define _ASM_IA64_MACHVEC_H
#include <linux/config.h>
#include <linux/types.h>
/* forward declarations: */
struct device;
struct pt_regs;
struct scatterlist;
struct irq_desc;
struct page;
struct mm_struct;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
typedef u8 ia64_mv_irq_to_vector (unsigned int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
typedef int ia64_mv_dma_supported (struct device *, u64);
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
* expected to follow this architected model (see Section 10.7 in the
* IA-64 Architecture Software Developer's Manual). Unfortunately,
* some broken machines do not follow that model, which is why we have
* to make the inX/outX operations part of the machine vector.
* Platform designers should follow the architected model whenever
* possible.
*/
typedef unsigned int ia64_mv_inb_t (unsigned long);
typedef unsigned int ia64_mv_inw_t (unsigned long);
typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiowb_t (void);
typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
static inline void
machvec_noop (void)
{
}
static inline void
machvec_noop_mm (struct mm_struct *mm)
{
}
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
# elif defined (CONFIG_IA64_DIG)
# include <asm/machvec_dig.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_SGI_SN2)
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_GENERIC)
# ifdef MACHVEC_PLATFORM_HEADER
# include MACHVEC_PLATFORM_HEADER
# else
# define platform_name ia64_mv.name
# define platform_setup ia64_mv.setup
# define platform_cpu_init ia64_mv.cpu_init
# define platform_irq_init ia64_mv.irq_init
# define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
# define platform_dma_free_coherent ia64_mv.dma_free_coherent
# define platform_dma_map_single ia64_mv.dma_map_single
# define platform_dma_unmap_single ia64_mv.dma_unmap_single
# define platform_dma_map_sg ia64_mv.dma_map_sg
# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg
# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
# define platform_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_inb ia64_mv.inb
# define platform_inw ia64_mv.inw
# define platform_inl ia64_mv.inl
# define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl
# define platform_mmiowb ia64_mv.mmiowb
# define platform_readb ia64_mv.readb
# define platform_readw ia64_mv.readw
# define platform_readl ia64_mv.readl
# define platform_readq ia64_mv.readq
# define platform_readb_relaxed ia64_mv.readb_relaxed
# define platform_readw_relaxed ia64_mv.readw_relaxed
# define platform_readl_relaxed ia64_mv.readl_relaxed
# define platform_readq_relaxed ia64_mv.readq_relaxed
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
* structure multiple of 16 bytes.
* This will fillup the holes created because of section 3.3.1 in
* Software Conventions guide.
*/
struct ia64_machine_vector {
const char *name;
ia64_mv_setup_t *setup;
ia64_mv_cpu_init_t *cpu_init;
ia64_mv_irq_init_t *irq_init;
ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
ia64_mv_dma_free_coherent *dma_free_coherent;
ia64_mv_dma_map_single *dma_map_single;
ia64_mv_dma_unmap_single *dma_unmap_single;
ia64_mv_dma_map_sg *dma_map_sg;
ia64_mv_dma_unmap_sg *dma_unmap_sg;
ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
ia64_mv_dma_mapping_error *dma_mapping_error;
ia64_mv_dma_supported *dma_supported;
ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_inb_t *inb;
ia64_mv_inw_t *inw;
ia64_mv_inl_t *inl;
ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl;
ia64_mv_mmiowb_t *mmiowb;
ia64_mv_readb_t *readb;
ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
ia64_mv_readb_relaxed_t *readb_relaxed;
ia64_mv_readw_relaxed_t *readw_relaxed;
ia64_mv_readl_relaxed_t *readl_relaxed;
ia64_mv_readq_relaxed_t *readq_relaxed;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
{ \
#name, \
platform_setup, \
platform_cpu_init, \
platform_irq_init, \
platform_send_ipi, \
platform_timer_interrupt, \
platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \
platform_dma_alloc_coherent, \
platform_dma_free_coherent, \
platform_dma_map_single, \
platform_dma_unmap_single, \
platform_dma_map_sg, \
platform_dma_unmap_sg, \
platform_dma_sync_single_for_cpu, \
platform_dma_sync_sg_for_cpu, \
platform_dma_sync_single_for_device, \
platform_dma_sync_sg_for_device, \
platform_dma_mapping_error, \
platform_dma_supported, \
platform_irq_desc, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_inb, \
platform_inw, \
platform_inl, \
platform_outb, \
platform_outw, \
platform_outl, \
platform_mmiowb, \
platform_readb, \
platform_readw, \
platform_readl, \
platform_readq, \
platform_readb_relaxed, \
platform_readw_relaxed, \
platform_readl_relaxed, \
platform_readq_relaxed, \
}
extern struct ia64_machine_vector ia64_mv;
extern void machvec_init (const char *name);
# else
# error Unknown configuration. Update asm-ia64/machvec.h.
# endif /* CONFIG_IA64_GENERIC */
/*
* Declare default routines which aren't declared anywhere else:
*/
extern ia64_mv_dma_init swiotlb_init;
extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
extern ia64_mv_dma_map_single swiotlb_map_single;
extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
extern ia64_mv_dma_map_sg swiotlb_map_sg;
extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
extern ia64_mv_dma_supported swiotlb_dma_supported;
/*
* Define default versions so we can extend machvec for new platforms without having
* to update the machvec files for all existing platforms.
*/
#ifndef platform_setup
# define platform_setup machvec_setup
#endif
#ifndef platform_cpu_init
# define platform_cpu_init machvec_noop
#endif
#ifndef platform_irq_init
# define platform_irq_init machvec_noop
#endif
#ifndef platform_send_ipi
# define platform_send_ipi ia64_send_ipi /* default to architected version */
#endif
#ifndef platform_timer_interrupt
# define platform_timer_interrupt machvec_timer_interrupt
#endif
#ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif
#ifndef platform_tlb_migrate_finish
# define platform_tlb_migrate_finish machvec_noop_mm
#endif
#ifndef platform_dma_init
# define platform_dma_init swiotlb_init
#endif
#ifndef platform_dma_alloc_coherent
# define platform_dma_alloc_coherent swiotlb_alloc_coherent
#endif
#ifndef platform_dma_free_coherent
# define platform_dma_free_coherent swiotlb_free_coherent
#endif
#ifndef platform_dma_map_single
# define platform_dma_map_single swiotlb_map_single
#endif
#ifndef platform_dma_unmap_single
# define platform_dma_unmap_single swiotlb_unmap_single
#endif
#ifndef platform_dma_map_sg
# define platform_dma_map_sg swiotlb_map_sg
#endif
#ifndef platform_dma_unmap_sg
# define platform_dma_unmap_sg swiotlb_unmap_sg
#endif
#ifndef platform_dma_sync_single_for_cpu
# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
#endif
#ifndef platform_dma_sync_sg_for_cpu
# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
#endif
#ifndef platform_dma_sync_single_for_device
# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
#endif
#ifndef platform_dma_sync_sg_for_device
# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
#endif
#ifndef platform_dma_mapping_error
# define platform_dma_mapping_error swiotlb_dma_mapping_error
#endif
#ifndef platform_dma_supported
# define platform_dma_supported swiotlb_dma_supported
#endif
#ifndef platform_irq_desc
# define platform_irq_desc __ia64_irq_desc
#endif
#ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector
#endif
#ifndef platform_local_vector_to_irq
# define platform_local_vector_to_irq __ia64_local_vector_to_irq
#endif
#ifndef platform_inb
# define platform_inb __ia64_inb
#endif
#ifndef platform_inw
# define platform_inw __ia64_inw
#endif
#ifndef platform_inl
# define platform_inl __ia64_inl
#endif
#ifndef platform_outb
# define platform_outb __ia64_outb
#endif
#ifndef platform_outw
# define platform_outw __ia64_outw
#endif
#ifndef platform_outl
# define platform_outl __ia64_outl
#endif
#ifndef platform_mmiowb
# define platform_mmiowb __ia64_mmiowb
#endif
#ifndef platform_readb
# define platform_readb __ia64_readb
#endif
#ifndef platform_readw
# define platform_readw __ia64_readw
#endif
#ifndef platform_readl
# define platform_readl __ia64_readl
#endif
#ifndef platform_readq
# define platform_readq __ia64_readq
#endif
#ifndef platform_readb_relaxed
# define platform_readb_relaxed __ia64_readb_relaxed
#endif
#ifndef platform_readw_relaxed
# define platform_readw_relaxed __ia64_readw_relaxed
#endif
#ifndef platform_readl_relaxed
# define platform_readl_relaxed __ia64_readl_relaxed
#endif
#ifndef platform_readq_relaxed
# define platform_readq_relaxed __ia64_readq_relaxed
#endif
#endif /* _ASM_IA64_MACHVEC_H */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_IA64_MACHVEC_DIG_h
#define _ASM_IA64_MACHVEC_DIG_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_irq_init_t dig_irq_init;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "dig"
#define platform_setup dig_setup
#define platform_irq_init dig_irq_init
#endif /* _ASM_IA64_MACHVEC_DIG_h */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_IA64_MACHVEC_HPSIM_h
#define _ASM_IA64_MACHVEC_HPSIM_h
extern ia64_mv_setup_t hpsim_setup;
extern ia64_mv_irq_init_t hpsim_irq_init;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "hpsim"
#define platform_setup hpsim_setup
#define platform_irq_init hpsim_irq_init
#endif /* _ASM_IA64_MACHVEC_HPSIM_h */

View File

@@ -0,0 +1,38 @@
#ifndef _ASM_IA64_MACHVEC_HPZX1_h
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
extern ia64_mv_setup_t sba_setup;
extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
extern ia64_mv_dma_free_coherent sba_free_coherent;
extern ia64_mv_dma_map_single sba_map_single;
extern ia64_mv_dma_unmap_single sba_unmap_single;
extern ia64_mv_dma_map_sg sba_map_sg;
extern ia64_mv_dma_unmap_sg sba_unmap_sg;
extern ia64_mv_dma_supported sba_dma_supported;
extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "hpzx1"
#define platform_setup sba_setup
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_free_coherent sba_free_coherent
#define platform_dma_map_single sba_map_single
#define platform_dma_unmap_single sba_unmap_single
#define platform_dma_map_sg sba_map_sg
#define platform_dma_unmap_sg sba_unmap_sg
#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
#define platform_dma_sync_single_for_device machvec_dma_sync_single
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
#define platform_dma_supported sba_dma_supported
#define platform_dma_mapping_error sba_dma_mapping_error
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */

View File

@@ -0,0 +1,31 @@
#include <asm/machvec.h>
extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_irq_desc __ia64_irq_desc;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_inb_t __ia64_inb;
extern ia64_mv_inw_t __ia64_inw;
extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiowb_t __ia64_mmiowb;
extern ia64_mv_readb_t __ia64_readb;
extern ia64_mv_readw_t __ia64_readw;
extern ia64_mv_readl_t __ia64_readl;
extern ia64_mv_readq_t __ia64_readq;
extern ia64_mv_readb_t __ia64_readb_relaxed;
extern ia64_mv_readw_t __ia64_readw_relaxed;
extern ia64_mv_readl_t __ia64_readl_relaxed;
extern ia64_mv_readq_t __ia64_readq_relaxed;
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
= MACHVEC_INIT(name);
#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)

View File

@@ -0,0 +1,124 @@
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#ifndef _ASM_IA64_MACHVEC_SN2_H
#define _ASM_IA64_MACHVEC_SN2_H
extern ia64_mv_setup_t sn_setup;
extern ia64_mv_cpu_init_t sn_cpu_init;
extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_desc sn_irq_desc;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_mmiowb_t __sn_mmiowb;
extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_readb_t __sn_readb_relaxed;
extern ia64_mv_readw_t __sn_readw_relaxed;
extern ia64_mv_readl_t __sn_readl_relaxed;
extern ia64_mv_readq_t __sn_readq_relaxed;
extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
extern ia64_mv_dma_map_single sn_dma_map_single;
extern ia64_mv_dma_unmap_single sn_dma_unmap_single;
extern ia64_mv_dma_map_sg sn_dma_map_sg;
extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg;
extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define platform_name "sn2"
#define platform_setup sn_setup
#define platform_cpu_init sn_cpu_init
#define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_tlb_migrate_finish sn_tlb_migrate_finish
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
#define platform_inl __sn_inl
#define platform_outb __sn_outb
#define platform_outw __sn_outw
#define platform_outl __sn_outl
#define platform_mmiowb __sn_mmiowb
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_readb_relaxed __sn_readb_relaxed
#define platform_readw_relaxed __sn_readw_relaxed
#define platform_readl_relaxed __sn_readl_relaxed
#define platform_readq_relaxed __sn_readq_relaxed
#define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sn_dma_alloc_coherent
#define platform_dma_free_coherent sn_dma_free_coherent
#define platform_dma_map_single sn_dma_map_single
#define platform_dma_unmap_single sn_dma_unmap_single
#define platform_dma_map_sg sn_dma_map_sg
#define platform_dma_unmap_sg sn_dma_unmap_sg
#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
#include <asm/sn/io.h>
#endif /* _ASM_IA64_MACHVEC_SN2_H */

View File

@@ -0,0 +1,10 @@
#ifndef _ASM_IA64_MC146818RTC_H
#define _ASM_IA64_MC146818RTC_H
/*
* Machine dependent access functions for RTC registers.
*/
/* empty include file to satisfy the include in genrtc.c */
#endif /* _ASM_IA64_MC146818RTC_H */

View File

@@ -0,0 +1,126 @@
/*
* File: mca.h
* Purpose: Machine check handling specific defines
*
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander (vijay@engr.sgi.com)
* Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
*/
#ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H
#if !defined(__ASSEMBLY__)
#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/param.h>
#include <asm/sal.h>
#include <asm/processor.h>
#include <asm/mca_asm.h>
#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
typedef struct ia64_fptr {
unsigned long fp;
unsigned long gp;
} ia64_fptr_t;
typedef union cmcv_reg_u {
u64 cmcv_regval;
struct {
u64 cmcr_vector : 8;
u64 cmcr_reserved1 : 4;
u64 cmcr_ignored1 : 1;
u64 cmcr_reserved2 : 3;
u64 cmcr_mask : 1;
u64 cmcr_ignored2 : 47;
} cmcv_reg_s;
} cmcv_reg_t;
#define cmcv_mask cmcv_reg_s.cmcr_mask
#define cmcv_vector cmcv_reg_s.cmcr_vector
enum {
IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1
};
/* the following data structure is used for TLB error recovery purposes */
extern struct ia64_mca_tlb_info {
u64 cr_lid;
u64 percpu_paddr;
u64 ptce_base;
u32 ptce_count[2];
u32 ptce_stride[2];
u64 pal_paddr;
u64 pal_base;
} ia64_mca_tlb_list[NR_CPUS];
/* Information maintained by the MC infrastructure */
typedef struct ia64_mc_info_s {
u64 imi_mca_handler;
size_t imi_mca_handler_size;
u64 imi_monarch_init_handler;
size_t imi_monarch_init_handler_size;
u64 imi_slave_init_handler;
size_t imi_slave_init_handler_size;
u8 imi_rendez_checkin[NR_CPUS];
} ia64_mc_info_t;
typedef struct ia64_mca_sal_to_os_state_s {
u64 imsto_os_gp; /* GP of the os registered with the SAL */
u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */
u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */
u64 imsto_sal_gp; /* GP of the SAL - physical */
u64 imsto_rendez_state; /* Rendez state information */
u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going
* back to SAL from OS after MCA handling.
*/
u64 pal_min_state; /* from PAL in r17 */
u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */
} ia64_mca_sal_to_os_state_t;
enum {
IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
IA64_MCA_HALT = -3 /* System to be halted by SAL */
};
enum {
IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
};
typedef struct ia64_mca_os_to_sal_state_s {
u64 imots_os_status; /* OS status to SAL as to what happened
* with the MCA handling.
*/
u64 imots_sal_gp; /* GP of the SAL - physical */
u64 imots_context; /* 0 if return to same context
1 if return to new context */
u64 *imots_new_min_state; /* Pointer to structure containing
* new values of registers in the min state
* save area.
*/
u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
* back to SAL from OS after MCA handling.
*/
} ia64_mca_os_to_sal_state_t;
extern void ia64_mca_init(void);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void);
extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void);
extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_reg_MCA_extension(void*);
extern void ia64_unreg_MCA_extension(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_MCA_H */

View File

@@ -0,0 +1,308 @@
/*
* File: mca_asm.h
*
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander (vijay@engr.sgi.com)
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
*/
#ifndef _ASM_IA64_MCA_ASM_H
#define _ASM_IA64_MCA_ASM_H
#define PSR_IC 13
#define PSR_I 14
#define PSR_DT 17
#define PSR_RT 27
#define PSR_MC 35
#define PSR_IT 36
#define PSR_BN 44
/*
* This macro converts a instruction virtual address to a physical address
* Right now for simulation purposes the virtual addresses are
* direct mapped to physical addresses.
* 1. Lop off bits 61 thru 63 in the virtual address
*/
#define INST_VA_TO_PA(addr) \
dep addr = 0, addr, 61, 3
/*
* This macro converts a data virtual address to a physical address
* Right now for simulation purposes the virtual addresses are
* direct mapped to physical addresses.
* 1. Lop off bits 61 thru 63 in the virtual address
*/
#define DATA_VA_TO_PA(addr) \
tpa addr = addr
/*
* This macro converts a data physical address to a virtual address
* Right now for simulation purposes the virtual addresses are
* direct mapped to physical addresses.
* 1. Put 0x7 in bits 61 thru 63.
*/
#define DATA_PA_TO_VA(addr,temp) \
mov temp = 0x7 ;; \
dep addr = temp, addr, 61, 3
/*
* This macro jumps to the instruction at the given virtual address
* and starts execution in physical mode with all the address
* translations turned off.
* 1. Save the current psr
* 2. Make sure that all the upper 32 bits are off
*
* 3. Clear the interrupt enable and interrupt state collection bits
* in the psr before updating the ipsr and iip.
*
* 4. Turn off the instruction, data and rse translation bits of the psr
* and store the new value into ipsr
* Also make sure that the interrupts are disabled.
* Ensure that we are in little endian mode.
* [psr.{rt, it, dt, i, be} = 0]
*
* 5. Get the physical address corresponding to the virtual address
* of the next instruction bundle and put it in iip.
* (Using magic numbers 24 and 40 in the deposint instruction since
* the IA64_SDK code directly maps to lower 24bits as physical address
* from a virtual address).
*
* 6. Do an rfi to move the values from ipsr to psr and iip to ip.
*/
#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
mov old_psr = psr; \
;; \
dep old_psr = 0, old_psr, 32, 32; \
\
mov ar.rsc = 0 ; \
;; \
srlz.d; \
mov temp2 = ar.bspstore; \
;; \
DATA_VA_TO_PA(temp2); \
;; \
mov temp1 = ar.rnat; \
;; \
mov ar.bspstore = temp2; \
;; \
mov ar.rnat = temp1; \
mov temp1 = psr; \
mov temp2 = psr; \
;; \
\
dep temp2 = 0, temp2, PSR_IC, 2; \
;; \
mov psr.l = temp2; \
;; \
srlz.d; \
dep temp1 = 0, temp1, 32, 32; \
;; \
dep temp1 = 0, temp1, PSR_IT, 1; \
;; \
dep temp1 = 0, temp1, PSR_DT, 1; \
;; \
dep temp1 = 0, temp1, PSR_RT, 1; \
;; \
dep temp1 = 0, temp1, PSR_I, 1; \
;; \
dep temp1 = 0, temp1, PSR_IC, 1; \
;; \
dep temp1 = -1, temp1, PSR_MC, 1; \
;; \
mov cr.ipsr = temp1; \
;; \
LOAD_PHYSICAL(p0, temp2, start_addr); \
;; \
mov cr.iip = temp2; \
mov cr.ifs = r0; \
DATA_VA_TO_PA(sp); \
DATA_VA_TO_PA(gp); \
;; \
srlz.i; \
;; \
nop 1; \
nop 2; \
nop 1; \
nop 2; \
rfi; \
;;
/*
* This macro jumps to the instruction at the given virtual address
* and starts execution in virtual mode with all the address
* translations turned on.
* 1. Get the old saved psr
*
* 2. Clear the interrupt state collection bit in the current psr.
*
* 3. Set the instruction translation bit back in the old psr
* Note we have to do this since we are right now saving only the
* lower 32-bits of old psr.(Also the old psr has the data and
* rse translation bits on)
*
* 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
*
* 5. Reset the current thread pointer (r13).
*
* 6. Set iip to the virtual address of the next instruction bundle.
*
* 7. Do an rfi to move ipsr to psr and iip to ip.
*/
#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
mov temp2 = psr; \
;; \
mov old_psr = temp2; \
;; \
dep temp2 = 0, temp2, PSR_IC, 2; \
;; \
mov psr.l = temp2; \
mov ar.rsc = 0; \
;; \
srlz.d; \
mov r13 = ar.k6; \
mov temp2 = ar.bspstore; \
;; \
DATA_PA_TO_VA(temp2,temp1); \
;; \
mov temp1 = ar.rnat; \
;; \
mov ar.bspstore = temp2; \
;; \
mov ar.rnat = temp1; \
;; \
mov temp1 = old_psr; \
;; \
mov temp2 = 1; \
;; \
dep temp1 = temp2, temp1, PSR_IC, 1; \
;; \
dep temp1 = temp2, temp1, PSR_IT, 1; \
;; \
dep temp1 = temp2, temp1, PSR_DT, 1; \
;; \
dep temp1 = temp2, temp1, PSR_RT, 1; \
;; \
dep temp1 = temp2, temp1, PSR_BN, 1; \
;; \
\
mov cr.ipsr = temp1; \
movl temp2 = start_addr; \
;; \
mov cr.iip = temp2; \
;; \
DATA_PA_TO_VA(sp, temp1); \
DATA_PA_TO_VA(gp, temp2); \
srlz.i; \
;; \
nop 1; \
nop 2; \
nop 1; \
rfi \
;;
/*
* The following offsets capture the order in which the
* RSE related registers from the old context are
* saved onto the new stack frame.
*
* +-----------------------+
* |NDIRTY [BSP - BSPSTORE]|
* +-----------------------+
* | RNAT |
* +-----------------------+
* | BSPSTORE |
* +-----------------------+
* | IFS |
* +-----------------------+
* | PFS |
* +-----------------------+
* | RSC |
* +-----------------------+ <-------- Bottom of new stack frame
*/
#define rse_rsc_offset 0
#define rse_pfs_offset (rse_rsc_offset+0x08)
#define rse_ifs_offset (rse_pfs_offset+0x08)
#define rse_bspstore_offset (rse_ifs_offset+0x08)
#define rse_rnat_offset (rse_bspstore_offset+0x08)
#define rse_ndirty_offset (rse_rnat_offset+0x08)
/*
* rse_switch_context
*
* 1. Save old RSC onto the new stack frame
* 2. Save PFS onto new stack frame
* 3. Cover the old frame and start a new frame.
* 4. Save IFS onto new stack frame
* 5. Save the old BSPSTORE on the new stack frame
* 6. Save the old RNAT on the new stack frame
* 7. Write BSPSTORE with the new backing store pointer
* 8. Read and save the new BSP to calculate the #dirty registers
* NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
*/
#define rse_switch_context(temp,p_stackframe,p_bspstore) \
;; \
mov temp=ar.rsc;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.pfs;; \
st8 [p_stackframe]=temp,8; \
cover ;; \
mov temp=cr.ifs;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.bspstore;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.rnat;; \
st8 [p_stackframe]=temp,8; \
mov ar.bspstore=p_bspstore;; \
mov temp=ar.bsp;; \
sub temp=temp,p_bspstore;; \
st8 [p_stackframe]=temp,8;;
/*
* rse_return_context
* 1. Allocate a zero-sized frame
* 2. Store the number of dirty registers RSC.loadrs field
* 3. Issue a loadrs to insure that any registers from the interrupted
* context which were saved on the new stack frame have been loaded
* back into the stacked registers
* 4. Restore BSPSTORE
* 5. Restore RNAT
* 6. Restore PFS
* 7. Restore IFS
* 8. Restore RSC
* 9. Issue an RFI
*/
#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
;; \
alloc temp=ar.pfs,0,0,0,0; \
add p_stackframe=rse_ndirty_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
shl temp=temp,16;; \
mov ar.rsc=temp;; \
loadrs;; \
add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.bspstore=temp;; \
add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.rnat=temp;; \
add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.pfs=temp;; \
add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov cr.ifs=temp;; \
add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.rsc=temp ; \
mov temp=psr;; \
or temp=temp,psr_mask_reg;; \
mov cr.ipsr=temp;; \
mov temp=ip;; \
add temp=0x30,temp;; \
mov cr.iip=temp;; \
srlz.i;; \
rfi;;
#endif /* _ASM_IA64_MCA_ASM_H */

View File

@@ -0,0 +1,60 @@
#ifndef meminit_h
#define meminit_h
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
/*
* Entries defined so far:
* - boot param structure itself
* - memory map
* - initrd (optional)
* - command line string
* - kernel code & data
*
* More could be added if necessary
*/
#define IA64_MAX_RSVD_REGIONS 5
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
unsigned long end; /* virtual address of end of element + 1 */
};
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
extern int num_rsvd_regions;
extern void find_memory (void);
extern void reserve_memory (void);
extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
/*
* For rounding an address to the next IA64_GRANULE_SIZE or order
*/
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
#ifdef CONFIG_DISCONTIGMEM
extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
#else
# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
#endif
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long vmalloc_end;
extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
#endif
#endif /* meminit_h */

View File

@@ -0,0 +1,51 @@
#ifndef _ASM_IA64_MMAN_H
#define _ASM_IA64_MMAN_H
/*
* Based on <asm-i386/mman.h>.
*
* Modified 1998-2000, 2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
#define MAP_GROWSUP 0x00200 /* register stack-like segment */
#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
#define MAP_LOCKED 0x02000 /* pages are locked */
#define MAP_NORESERVE 0x04000 /* don't check for reservations */
#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
#define MS_SYNC 4 /* synchronous memory sync */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#define MADV_NORMAL 0x0 /* default page-in behavior */
#define MADV_RANDOM 0x1 /* page-in minimum required */
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FILE 0
#endif /* _ASM_IA64_MMAN_H */

View File

@@ -0,0 +1,11 @@
#ifndef __MMU_H
#define __MMU_H
/*
* Type for a context number. We declare it volatile to ensure proper ordering when it's
* accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
* init_new_context()).
*/
typedef volatile unsigned long mm_context_t;
#endif

View File

@@ -0,0 +1,170 @@
#ifndef _ASM_IA64_MMU_CONTEXT_H
#define _ASM_IA64_MMU_CONTEXT_H
/*
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* Routines to manage the allocation of task context numbers. Task context numbers are
* used to reduce or eliminate the need to perform TLB flushes due to context switches.
* Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
* consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a
* region id for this purpose.
*/
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
# ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
struct ia64_ctx {
spinlock_t lock;
unsigned int next; /* next context number to use */
unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
unsigned int max_ctx; /* max. context value supported by all CPUs */
};
extern struct ia64_ctx ia64_ctx;
DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void wrap_mmu_context (struct mm_struct *mm);
static inline void
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
{
}
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static inline void
delayed_tlb_flush (void)
{
extern void local_flush_tlb_all (void);
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
local_flush_tlb_all();
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
}
}
static inline mm_context_t
get_mmu_context (struct mm_struct *mm)
{
unsigned long flags;
mm_context_t context = mm->context;
if (context)
return context;
spin_lock_irqsave(&ia64_ctx.lock, flags);
{
/* re-check, now that we've got the lock: */
context = mm->context;
if (context == 0) {
cpus_clear(mm->cpu_vm_mask);
if (ia64_ctx.next >= ia64_ctx.limit)
wrap_mmu_context(mm);
mm->context = context = ia64_ctx.next++;
}
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
return context;
}
/*
* Initialize context number to some sane value. MM is guaranteed to be a brand-new
* address-space, so no TLB flushing is needed, ever.
*/
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
mm->context = 0;
return 0;
}
static inline void
destroy_context (struct mm_struct *mm)
{
/* Nothing to do. */
}
static inline void
reload_context (mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
old_rr4 = ia64_get_rr(0x8000000000000000UL);
rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8;
/* encode the region id, preferred page size, and VHPT enable bit: */
rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
rr1 = rr0 + 1*rid_incr;
rr2 = rr0 + 2*rid_incr;
rr3 = rr0 + 3*rid_incr;
rr4 = rr0 + 4*rid_incr;
#ifdef CONFIG_HUGETLB_PAGE
rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
#endif
ia64_set_rr(0x0000000000000000UL, rr0);
ia64_set_rr(0x2000000000000000UL, rr1);
ia64_set_rr(0x4000000000000000UL, rr2);
ia64_set_rr(0x6000000000000000UL, rr3);
ia64_set_rr(0x8000000000000000UL, rr4);
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
static inline void
activate_context (struct mm_struct *mm)
{
mm_context_t context;
do {
context = get_mmu_context(mm);
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
cpu_set(smp_processor_id(), mm->cpu_vm_mask);
reload_context(context);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */
} while (unlikely(context != mm->context));
}
#define deactivate_mm(tsk,mm) do { } while (0)
/*
* Switch from address space PREV to address space NEXT.
*/
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
delayed_tlb_flush();
/*
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.
*/
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
activate_context(next);
}
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */

View File

@@ -0,0 +1,33 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
#ifndef _ASM_IA64_MMZONE_H
#define _ASM_IA64_MMZONE_H
#include <linux/config.h>
#include <asm/page.h>
#include <asm/meminit.h>
#ifdef CONFIG_DISCONTIGMEM
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
# define MAX_PHYSNODE_ID 8
# define NR_NODES 8
# define NR_NODE_MEMBLKS (NR_NODES * 8)
#else /* sn2 is the biggest case, so we use that if !DIG */
# define MAX_PHYSNODE_ID 2048
# define NR_NODES 256
# define NR_NODE_MEMBLKS (NR_NODES * 4)
#endif
#else /* CONFIG_DISCONTIGMEM */
# define NR_NODE_MEMBLKS 4
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_IA64_MMZONE_H */

View File

@@ -0,0 +1,35 @@
#ifndef _ASM_IA64_MODULE_H
#define _ASM_IA64_MODULE_H
/*
* IA-64-specific support for kernel module loader.
*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define MODULE_PROC_FAMILY "ia64"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
#endif /* _ASM_IA64_MODULE_H */

View File

@@ -0,0 +1,27 @@
#ifndef _ASM_IA64_MSGBUF_H
#define _ASM_IA64_MSGBUF_H
/*
* The msqid64_ds structure for IA-64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
__kernel_time_t msg_rtime; /* last msgrcv time */
__kernel_time_t msg_ctime; /* last change time */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _ASM_IA64_MSGBUF_H */

View File

@@ -0,0 +1,21 @@
/*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
#ifndef ASM_MSI_H
#define ASM_MSI_H
#define NR_VECTORS NR_IRQS
#define FIRST_DEVICE_VECTOR IA64_FIRST_DEVICE_VECTOR
#define LAST_DEVICE_VECTOR IA64_LAST_DEVICE_VECTOR
static inline void set_intr_gate (int nr, void *func) {}
#define IO_APIC_VECTOR(irq) (irq)
#define ack_APIC_irq ia64_eoi
#define irq_desc _irq_desc
#define cpu_mask_to_apicid(mask) cpu_physical_id(first_cpu(mask))
#define MSI_DEST_MODE MSI_PHYSICAL_MODE
#define MSI_TARGET_CPU ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#define MSI_TARGET_CPU_SHIFT 4
#endif /* ASM_MSI_H */

View File

@@ -0,0 +1,25 @@
#ifndef _ASM_IA64_NAMEI_H
#define _ASM_IA64_NAMEI_H
/*
* Modified 1998, 1999, 2001
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#include <asm/ptrace.h>
#include <asm/system.h>
#define EMUL_PREFIX_LINUX_IA32 "/emul/ia32-linux/"
static inline char *
__emul_prefix (void)
{
switch (current->personality) {
case PER_LINUX32:
return EMUL_PREFIX_LINUX_IA32;
default:
return NULL;
}
}
#endif /* _ASM_IA64_NAMEI_H */

View File

@@ -0,0 +1,52 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
#ifndef _ASM_IA64_NODEDATA_H
#define _ASM_IA64_NODEDATA_H
#include <linux/config.h>
#include <linux/numa.h>
#include <asm/percpu.h>
#include <asm/mmzone.h>
#ifdef CONFIG_DISCONTIGMEM
/*
* Node Data. One of these structures is located on each node of a NUMA system.
*/
struct pglist_data;
struct ia64_node_data {
short active_cpu_count;
short node;
struct pglist_data *pg_data_ptrs[NR_NODES];
};
/*
* Return a pointer to the node_data structure for the executing cpu.
*/
#define local_node_data (local_cpu_data->node_data)
/*
* Given a node id, return a pointer to the pg_data_t for the node.
*
* NODE_DATA - should be used in all code not related to system
* initialization. It uses pernode data structures to minimize
* offnode memory references. However, these structure are not
* present during boot. This macro can be used once cpu_init
* completes.
*/
#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_IA64_NODEDATA_H */

View File

@@ -0,0 +1,74 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains NUMA specific prototypes and definitions.
*
* 2002/08/05 Erich Focht <efocht@ess.nec.de>
*
*/
#ifndef _ASM_IA64_NUMA_H
#define _ASM_IA64_NUMA_H
#include <linux/config.h>
#ifdef CONFIG_NUMA
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/numa.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <asm/mmzone.h>
extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
/* Stuff below this line could be architecture independent */
extern int num_node_memblks; /* total number of memory chunks */
/*
* List of node memory chunks. Filled when parsing SRAT table to
* obtain information about memory nodes.
*/
struct node_memblk_s {
unsigned long start_paddr;
unsigned long size;
int nid; /* which logical node contains this chunk? */
int bank; /* which mem bank on this node */
};
struct node_cpuid_s {
u16 phys_id; /* id << 8 | eid */
int nid; /* logical node containing this CPU */
};
extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
extern struct node_cpuid_s node_cpuid[NR_CPUS];
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
*/
extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
#define node_distance(from,to) (numa_slit[(from) * numnodes + (to)])
extern int paddr_to_nid(unsigned long paddr);
#define local_nodeid (cpu_to_node_map[smp_processor_id()])
#else /* !CONFIG_NUMA */
#define paddr_to_nid(addr) 0
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */

View File

@@ -0,0 +1,15 @@
#ifndef _ASM_MAX_NUMNODES_H
#define _ASM_MAX_NUMNODES_H
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
#elif defined(CONFIG_IA64_HP_ZX1)
/* Max 32 Nodes */
#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 256 Nodes */
#define NODES_SHIFT 8
#endif
#endif /* _ASM_MAX_NUMNODES_H */

View File

@@ -0,0 +1,196 @@
#ifndef _ASM_IA64_PAGE_H
#define _ASM_IA64_PAGE_H
/*
* Pagetable related stuff.
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/intrinsics.h>
#include <asm/types.h>
/*
* PAGE_SHIFT determines the actual kernel page size.
*/
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
# define PAGE_SHIFT 13
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14
#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16
#else
# error Unsupported page size!
#endif
#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#ifdef CONFIG_HUGETLB_PAGE
# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/
# define REGION_SHIFT 61
# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
# define HPAGE_SHIFT hpage_shift
# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
# define __pa(x) ((x) - PAGE_OFFSET)
# define __va(x) ((x) + PAGE_OFFSET)
#else /* !__ASSEMBLY */
# ifdef __KERNEL__
# define STRICT_MM_TYPECHECKS
extern void clear_page (void *page);
extern void copy_page (void *to, void *from);
/*
* clear_user_page() and copy_user_page() can't be inline functions because
* flush_dcache_page() can't be defined until later...
*/
#define clear_user_page(addr, vaddr, page) \
do { \
clear_page(addr); \
flush_dcache_page(page); \
} while (0)
#define copy_user_page(to, from, vaddr, page) \
do { \
copy_page((to), (from)); \
flush_dcache_page(page); \
} while (0)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifdef CONFIG_VIRTUAL_MEM_MAP
extern int ia64_pfn_valid (unsigned long pfn);
#else
# define ia64_pfn_valid(pfn) 1
#endif
#ifndef CONFIG_DISCONTIGMEM
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
# define page_to_pfn(page) ((unsigned long) (page - mem_map))
# define pfn_to_page(pfn) (mem_map + (pfn))
#else
extern struct page *vmem_map;
extern unsigned long max_low_pfn;
# define pfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn))
#endif
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
typedef union ia64_va {
struct {
unsigned long off : 61; /* intra-region offset */
unsigned long reg : 3; /* region number */
} f;
unsigned long l;
void *p;
} ia64_va;
/*
* Note: These macros depend on the fact that PAGE_OFFSET has all
* region bits set to 1 and all other bits set to zero. They are
* expressed in this way to ensure they result in a single "dep"
* instruction.
*/
#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
#define REGION_SIZE REGION_NUMBER(1)
#define REGION_KERNEL 7
#ifdef CONFIG_HUGETLB_PAGE
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
extern unsigned int hpage_shift;
#endif
static __inline__ int
get_order (unsigned long size)
{
long double d = size - 1;
long order;
order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0)
order = 0;
return order;
}
# endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
# define pgd_val(x) ((x).pgd)
# define pgprot_val(x) ((x).pgprot)
# define __pte(x) ((pte_t) { (x) } )
# define __pgprot(x) ((pgprot_t) { (x) } )
#else /* !STRICT_MM_TYPECHECKS */
/*
* .. while these make it easier on the compiler
*/
# ifndef __ASSEMBLY__
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
# endif
# define pte_val(x) (x)
# define pmd_val(x) (x)
# define pgd_val(x) (x)
# define pgprot_val(x) (x)
# define __pte(x) (x)
# define __pgd(x) (x)
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
(((current->personality & READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
#endif /* _ASM_IA64_PAGE_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
#ifndef _ASM_IA64_PARAM_H
#define _ASM_IA64_PARAM_H
/*
* Fundamental kernel parameters.
*
* Based on <asm-i386/param.h>.
*
* Modified 1998, 1999, 2002-2003
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#define EXEC_PAGESIZE 65536
#ifndef NOGROUP
# define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
# include <linux/config.h> /* mustn't include <linux/config.h> outside of #ifdef __KERNEL__ */
# ifdef CONFIG_IA64_HP_SIM
/*
* Yeah, simulating stuff is slow, so let us catch some breath between
* timer interrupts...
*/
# define HZ 32
# else
# define HZ 1024
# endif
# define USER_HZ HZ
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
#else
/*
* Technically, this is wrong, but some old apps still refer to it. The proper way to
* get the HZ value is via sysconf(_SC_CLK_TCK).
*/
# define HZ 1024
#endif
#endif /* _ASM_IA64_PARAM_H */

View File

@@ -0,0 +1,20 @@
/*
* parport.h: platform-specific PC-style parport initialisation
*
* Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
*
* This file should only be included by drivers/parport/parport_pc.c.
*/
#ifndef _ASM_IA64_PARPORT_H
#define _ASM_IA64_PARPORT_H 1
static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
static int __devinit
parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
return parport_pc_find_isa_ports(autoirq, autodma);
}
#endif /* _ASM_IA64_PARPORT_H */

View File

@@ -0,0 +1,25 @@
#ifndef _ASM_IA64_PATCH_H
#define _ASM_IA64_PATCH_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* There are a number of reasons for patching instructions. Rather than duplicating code
* all over the place, we put the common stuff here. Reasons for patching: in-kernel
* module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
* shared library. Undoubtedly, some of these reasons will disappear and others will
* be added over time.
*/
#include <linux/elf.h>
#include <linux/types.h>
extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
extern void ia64_patch_vtop (unsigned long start, unsigned long end);
extern void ia64_patch_gate (void);
#endif /* _ASM_IA64_PATCH_H */

View File

@@ -0,0 +1,132 @@
#ifndef _ASM_IA64_PCI_H
#define _ASM_IA64_PCI_H
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/scatterlist.h>
/*
* Can be used to override the logic in pci_scan_bus for skipping already-configured bus
* numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
* loader.
*/
#define pcibios_assign_all_busses() 0
#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void);
struct pci_dev;
/*
* PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
* between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
* MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
* network device layers. Platforms with separate bus address spaces _must_ turn this off
* and provide a device DMA mapping implementation that takes care of the necessary
* address translation.
*
* For now, the ia64 platforms which may have separate/multiple bus address spaces all
* have I/O MMUs which support the merging of physically discontiguous buffers, so we can
* use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
*/
extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
static inline void
pcibios_set_master (struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
static inline void
pcibios_penalize_isa_irq (int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
#define HAVE_ARCH_PCI_MWI 1
extern int pcibios_prep_mwi (struct pci_dev *);
#include <asm-generic/pci-dma-compat.h>
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
/* The ia64 platform always supports 64-bit addressing. */
#define pci_dac_dma_supported(pci_dev, mask) (1)
#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr)
#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
#define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
struct pci_window {
struct resource resource;
u64 offset;
};
struct pci_controller {
void *acpi_handle;
void *iommu;
int segment;
unsigned int windows;
struct pci_window *window;
void *platform_data;
};
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
extern struct pci_ops pci_root_ops;
static inline int pci_name_bus(char *name, struct pci_bus *bus)
{
if (pci_domain_nr(bus) == 0) {
sprintf(name, "%02x", bus->number);
} else {
sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
}
return 0;
}
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
{
}
extern void pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region, struct resource *res);
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
#define pcibios_scan_all_fns(a, b) 0
#endif /* _ASM_IA64_PCI_H */

View File

@@ -0,0 +1,72 @@
#ifndef _ASM_IA64_PERCPU_H
#define _ASM_IA64_PERCPU_H
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
#include <linux/config.h>
#include <linux/threads.h>
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
#else
# define __SMALL_ADDR_AREA
#endif
#define DECLARE_PER_CPU(type, name) \
extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
*/
#ifdef CONFIG_SMP
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
extern void setup_per_cpu_areas (void);
extern void *per_cpu_init(void);
#else /* ! SMP */
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
/*
* Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) (per_cpu__##var)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */

View File

@@ -0,0 +1,259 @@
/*
* Copyright (C) 2001-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#ifndef _ASM_IA64_PERFMON_H
#define _ASM_IA64_PERFMON_H
/*
* perfmon comamnds supported on all CPU models
*/
#define PFM_WRITE_PMCS 0x01
#define PFM_WRITE_PMDS 0x02
#define PFM_READ_PMDS 0x03
#define PFM_STOP 0x04
#define PFM_START 0x05
#define PFM_ENABLE 0x06 /* obsolete */
#define PFM_DISABLE 0x07 /* obsolete */
#define PFM_CREATE_CONTEXT 0x08
#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
#define PFM_RESTART 0x0a
#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
#define PFM_GET_FEATURES 0x0c
#define PFM_DEBUG 0x0d
#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
#define PFM_GET_PMC_RESET_VAL 0x0f
#define PFM_LOAD_CONTEXT 0x10
#define PFM_UNLOAD_CONTEXT 0x11
/*
* PMU model specific commands (may not be supported on all PMU models)
*/
#define PFM_WRITE_IBRS 0x20
#define PFM_WRITE_DBRS 0x21
/*
* context flags
*/
#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
/*
* event set flags
*/
#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
/*
* PMC flags
*/
#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
/*
* PMD/PMC/IBR/DBR return flags (ignored on input)
*
* Those flags are used on output and must be checked in case EAGAIN is returned
* by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
*/
#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
/*
* Request structure used to define a context
*/
typedef struct {
pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
unsigned long ctx_flags; /* noblock/block */
unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
unsigned short ctx_reserved1; /* for future use */
int ctx_fd; /* return arg: unique identification for context */
void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
unsigned long ctx_reserved2[11];/* for future use */
} pfarg_context_t;
/*
* Request structure used to write/read a PMC or PMD
*/
typedef struct {
unsigned int reg_num; /* which register */
unsigned short reg_set; /* event set for this register */
unsigned short reg_reserved1; /* for future use */
unsigned long reg_value; /* initial pmc/pmd value */
unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
unsigned long reg_long_reset; /* reset after buffer overflow notification */
unsigned long reg_short_reset; /* reset after counter overflow */
unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
unsigned long reg_random_seed; /* seed value when randomization is used */
unsigned long reg_random_mask; /* bitmask used to limit random value */
unsigned long reg_last_reset_val;/* return: PMD last reset value */
unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
unsigned long reg_reserved2[3]; /* for future use */
} pfarg_reg_t;
typedef struct {
unsigned int dbreg_num; /* which debug register */
unsigned short dbreg_set; /* event set for this register */
unsigned short dbreg_reserved1; /* for future use */
unsigned long dbreg_value; /* value for debug register */
unsigned long dbreg_flags; /* return: dbreg error */
unsigned long dbreg_reserved2[1]; /* for future use */
} pfarg_dbreg_t;
typedef struct {
unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
unsigned int ft_reserved; /* reserved for future use */
unsigned long reserved[4]; /* for future use */
} pfarg_features_t;
typedef struct {
pid_t load_pid; /* process to load the context into */
unsigned short load_set; /* first event set to load */
unsigned short load_reserved1; /* for future use */
unsigned long load_reserved2[3]; /* for future use */
} pfarg_load_t;
typedef struct {
int msg_type; /* generic message header */
int msg_ctx_fd; /* generic message header */
unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
unsigned short msg_active_set; /* active set at the time of overflow */
unsigned short msg_reserved1; /* for future use */
unsigned int msg_reserved2; /* for future use */
unsigned long msg_tstamp; /* for perf tuning/debug */
} pfm_ovfl_msg_t;
typedef struct {
int msg_type; /* generic message header */
int msg_ctx_fd; /* generic message header */
unsigned long msg_tstamp; /* for perf tuning */
} pfm_end_msg_t;
typedef struct {
int msg_type; /* type of the message */
int msg_ctx_fd; /* unique identifier for the context */
unsigned long msg_tstamp; /* for perf tuning */
} pfm_gen_msg_t;
#define PFM_MSG_OVFL 1 /* an overflow happened */
#define PFM_MSG_END 2 /* task to which context was attached ended */
typedef union {
pfm_ovfl_msg_t pfm_ovfl_msg;
pfm_end_msg_t pfm_end_msg;
pfm_gen_msg_t pfm_gen_msg;
} pfm_msg_t;
/*
* Define the version numbers for both perfmon as a whole and the sampling buffer format.
*/
#define PFM_VERSION_MAJ 2U
#define PFM_VERSION_MIN 0U
#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
/*
* miscellaneous architected definitions
*/
#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
#ifdef __KERNEL__
extern long perfmonctl(int fd, int cmd, void *arg, int narg);
extern void pfm_save_regs (struct task_struct *);
extern void pfm_load_regs (struct task_struct *);
extern void pfm_exit_thread(struct task_struct *);
extern int pfm_use_debug_registers(struct task_struct *);
extern int pfm_release_debug_registers(struct task_struct *);
extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
extern void pfm_init_percpu(void);
extern void pfm_handle_work(void);
/*
* Reset PMD register flags
*/
#define PFM_PMD_SHORT_RESET 0
#define PFM_PMD_LONG_RESET 1
typedef union {
unsigned int val;
struct {
unsigned int notify_user:1; /* notify user program of overflow */
unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
unsigned int block_task:1; /* block monitored task on kernel exit */
unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
unsigned int reserved:28; /* for future use */
} bits;
} pfm_ovfl_ctrl_t;
typedef struct {
unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
unsigned short active_set; /* event set active at the time of the overflow */
pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
unsigned long pmd_last_reset; /* last reset value of of the PMD */
unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
unsigned long pmd_value; /* current 64-bit value of the PMD */
unsigned long pmd_eventid; /* eventid associated with PMD */
} pfm_ovfl_arg_t;
typedef struct {
char *fmt_name;
pfm_uuid_t fmt_uuid;
size_t fmt_arg_size;
unsigned long fmt_flags;
int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
struct list_head fmt_list;
} pfm_buffer_fmt_t;
extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
/*
* perfmon interface exported to modules
*/
extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
/*
* describe the content of the local_cpu_date->pfm_syst_info field
*/
#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_PERFMON_H */

View File

@@ -0,0 +1,83 @@
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* This file implements the default sampling buffer format
* for Linux/ia64 perfmon subsystem.
*/
#ifndef __PERFMON_DEFAULT_SMPL_H__
#define __PERFMON_DEFAULT_SMPL_H__ 1
#define PFM_DEFAULT_SMPL_UUID { \
0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
/*
* format specific parameters (passed at context creation)
*/
typedef struct {
unsigned long buf_size; /* size of the buffer in bytes */
unsigned int flags; /* buffer specific flags */
unsigned int res1; /* for future use */
unsigned long reserved[2]; /* for future use */
} pfm_default_smpl_arg_t;
/*
* combined context+format specific structure. Can be passed
* to PFM_CONTEXT_CREATE
*/
typedef struct {
pfarg_context_t ctx_arg;
pfm_default_smpl_arg_t buf_arg;
} pfm_default_smpl_ctx_arg_t;
/*
* This header is at the beginning of the sampling buffer returned to the user.
* It is directly followed by the first record.
*/
typedef struct {
unsigned long hdr_count; /* how many valid entries */
unsigned long hdr_cur_offs; /* current offset from top of buffer */
unsigned long hdr_reserved2; /* reserved for future use */
unsigned long hdr_overflows; /* how many times the buffer overflowed */
unsigned long hdr_buf_size; /* how many bytes in the buffer */
unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
unsigned int hdr_reserved1; /* for future use */
unsigned long hdr_reserved[10]; /* for future use */
} pfm_default_smpl_hdr_t;
/*
* Entry header in the sampling buffer. The header is directly followed
* with the values of the PMD registers of interest saved in increasing
* index order: PMD4, PMD5, and so on. How many PMDs are present depends
* on how the session was programmed.
*
* In the case where multiple counters overflow at the same time, multiple
* entries are written consecutively.
*
* last_reset_value member indicates the initial value of the overflowed PMD.
*/
typedef struct {
int pid; /* thread id (for NPTL, this is gettid()) */
unsigned char reserved1[3]; /* reserved for future use */
unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned long last_reset_val; /* initial value of overflowed PMD */
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
unsigned short cpu; /* cpu on which the overfow occured */
unsigned short set; /* event set active when overflow ocurred */
int tgid; /* thread group id (for NPTL, this is getpid()) */
} pfm_default_smpl_entry_t;
#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
#endif /* __PERFMON_DEFAULT_SMPL_H__ */

View File

@@ -0,0 +1,177 @@
#ifndef _ASM_IA64_PGALLOC_H
#define _ASM_IA64_PGALLOC_H
/*
* This file contains the functions and defines necessary to allocate
* page tables.
*
* This hopefully works with any (fixed) ia-64 page-size, as defined
* in <asm/page.h> (currently 8192).
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
*/
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/threads.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
/*
* Very stupidly, we used to get new pgd's and pmd's, init their contents
* to point to the NULL versions of the next level page table, later on
* completely re-init them the same way, then free them up. This wasted
* a lot of work and caused unnecessary memory traffic. How broken...
* We fix this by caching them.
*/
#define pgd_quicklist (local_cpu_data->pgd_quick)
#define pmd_quicklist (local_cpu_data->pmd_quick)
#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
static inline pgd_t*
pgd_alloc_one_fast (struct mm_struct *mm)
{
unsigned long *ret = NULL;
preempt_disable();
ret = pgd_quicklist;
if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
} else
ret = NULL;
preempt_enable();
return (pgd_t *) ret;
}
static inline pgd_t*
pgd_alloc (struct mm_struct *mm)
{
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t *pgd = pgd_alloc_one_fast(mm);
if (unlikely(pgd == NULL)) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (likely(pgd != NULL))
clear_page(pgd);
}
return pgd;
}
static inline void
pgd_free (pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
++pgtable_cache_size;
preempt_enable();
}
static inline void
pgd_populate (struct mm_struct *mm, pgd_t *pgd_entry, pmd_t *pmd)
{
pgd_val(*pgd_entry) = __pa(pmd);
}
static inline pmd_t*
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{
unsigned long *ret = NULL;
preempt_disable();
ret = (unsigned long *)pmd_quicklist;
if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
}
preempt_enable();
return (pmd_t *)ret;
}
static inline pmd_t*
pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pmd != NULL))
clear_page(pmd);
return pmd;
}
static inline void
pmd_free (pmd_t *pmd)
{
preempt_disable();
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
++pgtable_cache_size;
preempt_enable();
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
static inline void
pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
static inline void
pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
{
pmd_val(*pmd_entry) = __pa(pte);
}
static inline struct page *
pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{
struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (likely(pte != NULL))
clear_page(page_address(pte));
return pte;
}
static inline pte_t *
pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL))
clear_page(pte);
return pte;
}
static inline void
pte_free (struct page *pte)
{
__free_page(pte);
}
static inline void
pte_free_kernel (pte_t *pte)
{
free_page((unsigned long) pte);
}
#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
extern void check_pgt_cache (void);
#endif /* _ASM_IA64_PGALLOC_H */

View File

@@ -0,0 +1,565 @@
#ifndef _ASM_IA64_PGTABLE_H
#define _ASM_IA64_PGTABLE_H
/*
* This file contains the functions and defines necessary to modify and use
* the IA-64 page table tree.
*
* This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h>.
*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <asm/mman.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/types.h>
#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
/*
* First, define the various bits in a PTE. Note that the PTE format
* matches the VHPT short format, the firt doubleword of the VHPD long
* format, and the first doubleword of the TLB insertion format.
*/
#define _PAGE_P_BIT 0
#define _PAGE_A_BIT 5
#define _PAGE_D_BIT 6
#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
#define _PAGE_MA_MASK (0x7 << 2)
#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
#define _PAGE_PL_MASK (3 << 7)
#define _PAGE_AR_R (0 << 9) /* read only */
#define _PAGE_AR_RX (1 << 9) /* read & execute */
#define _PAGE_AR_RW (2 << 9) /* read & write */
#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
#define _PAGE_AR_MASK (7 << 9)
#define _PAGE_AR_SHIFT 9
#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
/* Valid only for a PTE with the present bit cleared: */
#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
#define _PFN_MASK _PAGE_PPN_MASK
/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
#define _PAGE_SIZE_4K 12
#define _PAGE_SIZE_8K 13
#define _PAGE_SIZE_16K 14
#define _PAGE_SIZE_64K 16
#define _PAGE_SIZE_256K 18
#define _PAGE_SIZE_1M 20
#define _PAGE_SIZE_4M 22
#define _PAGE_SIZE_16M 24
#define _PAGE_SIZE_64M 26
#define _PAGE_SIZE_256M 28
#define _PAGE_SIZE_1G 30
#define _PAGE_SIZE_4G 32
#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
/*
* Definitions for first level:
*
* PGDIR_SHIFT determines what a first-level page table entry can map.
*/
#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (__IA64_UL(1) << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
#define FIRST_USER_PGD_NR 0
/*
* Definitions for second level:
*
* PMD_SHIFT determines the size of the area a second-level page table
* can map.
*/
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD (__IA64_UL(1) << (PAGE_SHIFT-3))
/*
* Definitions for third level:
*/
#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
/*
* All the normal masks have the "page accessed" bits on, as any time
* they are used, the page is accessed. They are cleared only by the
* page-out routines.
*/
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
# ifndef __ASSEMBLY__
#include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
/*
* Next come the mappings that determine how mmap() protection bits
* (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
* _P version gets used for a private shared memory segment, the _S
* version gets used for a shared memory segment with MAP_SHARED on.
* In a private shared memory segment, we do a copy-on-write if a task
* attempts to write to the page.
*/
/* xwr */
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
#define __P011 PAGE_READONLY /* ditto */
#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
#define __S011 PAGE_SHARED
#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
/*
* Some definitions to translate between mem_map, PTEs, and page addresses:
*/
/* Quick test to see if ADDR is a (potentially) valid physical address. */
static inline long
ia64_phys_addr_valid (unsigned long addr)
{
return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
}
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
* PAGE_OFFSET. This operation can be relatively expensive (e.g.,
* require a hash-, or multi-level tree-lookup or something of that
* sort) but it guarantees to return TRUE only if accessing the page
* at that address does not cause an error. Note that there may be
* addresses for which kern_addr_valid() returns FALSE even though an
* access would not cause an error (e.g., this is typically true for
* memory mapped I/O regions.
*
* XXX Need to implement this for IA-64.
*/
#define kern_addr_valid(addr) (1)
/*
* Now come the defines and routines to manage and access the three-level
* page table.
*/
/*
* On some architectures, special things need to be done when setting
* the PTE in a page table. Nothing special needs to be on IA-64.
*/
#define set_pte(ptep, pteval) (*(ptep) = (pteval))
#define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7
#define VMALLOC_START 0xa000000200000000UL
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end;
#else
# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
#endif
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* table entry (pte).
*/
#define pfn_pte(pfn, pgprot) \
({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
/* Extract pfn from pte. */
#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
#define pte_modify(_pte, newprot) \
(__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
#define page_pte_prot(page,prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
/* pte_page() returns the "struct page *" corresponding to the PTE: */
#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
/*
* The following have defined behavior only work if pte_present() is true.
*/
#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
/*
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
* access rights:
*/
#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
/*
* Macro to a page protection value as "uncacheable". Note that "protection" is really a
* misnomer here as the protection value contains the memory attribute bits, dirty bits,
* and various other bits as well.
*/
#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
/*
* Macro to make mark a page protection value as "write-combining".
* Note that "protection" is really a misnomer here as the protection
* value contains the memory attribute bits, dirty bits, and various
* other bits as well. Accesses through a write-combining translation
* works bypasses the caches, but does allow for consecutive writes to
* be combined into single (but larger) write transactions.
*/
#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
static inline unsigned long
pgd_index (unsigned long address)
{
unsigned long region = address >> 61;
unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
return (region << (PAGE_SHIFT - 6)) | l1index;
}
/* The offset in the 1-level directory is given by the 3 region bits
(61..63) and the level-1 bits. */
static inline pgd_t*
pgd_offset (struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
}
/* In the kernel's mapped region we completely ignore the region number
(since we know it's in region number 5). */
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
/*
* Find an entry in the third-level page table. This looks more complicated than it
* should be because some platforms place page tables in high memory.
*/
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/* atomic versions of the some PTE manipulations: */
static inline int
ptep_test_and_clear_young (pte_t *ptep)
{
#ifdef CONFIG_SMP
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(_PAGE_A_BIT, ptep);
#else
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
return 1;
#endif
}
static inline int
ptep_test_and_clear_dirty (pte_t *ptep)
{
#ifdef CONFIG_SMP
if (!pte_dirty(*ptep))
return 0;
return test_and_clear_bit(_PAGE_D_BIT, ptep);
#else
pte_t pte = *ptep;
if (!pte_dirty(pte))
return 0;
set_pte(ptep, pte_mkclean(pte));
return 1;
#endif
}
static inline pte_t
ptep_get_and_clear (pte_t *ptep)
{
#ifdef CONFIG_SMP
return __pte(xchg((long *) ptep, 0));
#else
pte_t pte = *ptep;
pte_clear(ptep);
return pte;
#endif
}
static inline void
ptep_set_wrprotect (pte_t *ptep)
{
#ifdef CONFIG_SMP
unsigned long new, old;
do {
old = pte_val(*ptep);
new = pte_val(pte_wrprotect(__pte (old)));
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
#endif
}
static inline void
ptep_mkdirty (pte_t *ptep)
{
#ifdef CONFIG_SMP
set_bit(_PAGE_D_BIT, ptep);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
#endif
}
static inline int
pte_same (pte_t a, pte_t b)
{
return pte_val(a) == pte_val(b);
}
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
/*
* Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
* bits in the swap-type field of the swap pte. It would be nice to
* enforce that, but we can't easily include <linux/swap.h> here.
* (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
*
* Format of swap pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be zero)
* bits 2- 8: swap-type
* bits 9-62: swap offset
* bit 63 : _PAGE_PROTNONE bit
*
* Format of file pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be one)
* bits 2-62: file_offset/PAGE_SIZE
* bit 63 : _PAGE_PROTNONE bit
*/
#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
#define __swp_offset(entry) (((entry).val << 1) >> 10)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 61
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
/* XXX is this right? */
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
extern struct page *zero_page_memmap_ptr;
#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
#ifdef CONFIG_HUGETLB_PAGE
#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
struct mmu_gather;
extern void hugetlb_free_pgtables(struct mmu_gather *tlb,
struct vm_area_struct * prev, unsigned long start, unsigned long end);
#endif
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this routine to take care of any (delayed) i-cache
* flushing that may be necessary.
*/
extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte);
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Update PTEP with ENTRY, which is guaranteed to be a less
* restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
* WRITABLE bits turned on, when the value at PTEP did not. The
* WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
*
* SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
* having to worry about races. On SMP machines, there are only two
* cases where this is true:
*
* (1) *PTEP has the PRESENT bit turned OFF
* (2) ENTRY has the DIRTY bit turned ON
*
* On ia64, we could implement this routine with a cmpxchg()-loop
* which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
* However, like on x86, we can get a more streamlined version by
* observing that it is OK to drop ACCESSED bit updates when
* SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
* result in an extra Access-bit fault, which would then turn on the
* ACCESSED bit in the low-level fault handler (iaccess_bit or
* daccess_bit in ivt.S).
*/
#ifdef CONFIG_SMP
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
do { \
if (__safely_writable) { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __addr); \
} \
} while (0)
#else
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
ptep_establish(__vma, __addr, __ptep, __entry)
#endif
# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
# define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
/*
* Identity-mapped regions use a large page size. We'll call such large pages
* "granules". If you can think of a better name that's unambiguous, let me
* know...
*/
#if defined(CONFIG_IA64_GRANULE_64MB)
# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
#elif defined(CONFIG_IA64_GRANULE_16MB)
# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
#endif
#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
/*
* log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
*/
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* These tell get_user_pages() that the first gate page is accessible from user-level. */
#define FIXADDR_USER_START GATE_ADDR
#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
#define __HAVE_ARCH_PGD_OFFSET_GATE
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */

View File

@@ -0,0 +1,31 @@
#ifndef _ASM_IA64_POLL_H
#define _ASM_IA64_POLL_H
/*
* poll(2) bit definitions. Based on <asm-i386/poll.h>.
*
* Modified 1998, 1999, 2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#define POLLWRNORM 0x0100
#define POLLWRBAND 0x0200
#define POLLMSG 0x0400
#define POLLREMOVE 0x1000
struct pollfd {
int fd;
short events;
short revents;
};
#endif /* _ASM_IA64_POLL_H */

View File

@@ -0,0 +1,126 @@
#ifndef _ASM_IA64_POSIX_TYPES_H
#define _ASM_IA64_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*
* Based on <asm-alpha/posix_types.h>.
*
* Modified 1998-2000, 2003
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t;
typedef long long __kernel_loff_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef struct {
int val[2];
} __kernel_fsid_t;
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
typedef __kernel_uid_t __kernel_uid32_t;
typedef __kernel_gid_t __kernel_gid32_t;
typedef unsigned int __kernel_old_dev_t;
# ifdef __KERNEL__
# ifndef __GNUC__
#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
#define __FD_ZERO(set) \
((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
# else /* !__GNUC__ */
/* With GNU C, use inline functions instead so args are evaluated only once: */
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
}
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
}
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *p)
{
unsigned long *tmp = p->fds_bits;
int i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
return;
case 8:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
return;
case 4:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
return;
}
}
i = __FDSET_LONGS;
while (i) {
i--;
*tmp = 0;
tmp++;
}
}
# endif /* !__GNUC__ */
# endif /* __KERNEL__ */
#endif /* _ASM_IA64_POSIX_TYPES_H */

View File

@@ -0,0 +1,698 @@
#ifndef _ASM_IA64_PROCESSOR_H
#define _ASM_IA64_PROCESSOR_H
/*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*
* 11/24/98 S.Eranian added ia64_set_iva()
* 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
*/
#include <linux/config.h>
#include <asm/intrinsics.h>
#include <asm/kregs.h>
#include <asm/ptrace.h>
#include <asm/ustack.h>
/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
#define ARCH_HAS_SCHED_DOMAIN
#define IA64_NUM_DBG_REGS 8
/*
* Limits for PMC and PMD are set to less than maximum architected values
* but should be sufficient for a while
*/
#define IA64_NUM_PMC_REGS 32
#define IA64_NUM_PMD_REGS 32
#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
/*
* TASK_SIZE really is a mis-named. It really is the maximum user
* space address (plus one). On IA-64, there are five regions of 2TB
* each (assuming 8KB page size), for a total of 8TB of user virtual
* address space.
*/
#define TASK_SIZE (current->thread.task_size)
/*
* MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
* address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
* because the kernel may have installed helper-mappings above TASK_SIZE. For example,
* for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
*/
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (current->thread.map_base)
#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
/* bit 5 is currently unused */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
#define IA64_THREAD_UAC_SHIFT 3
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
#define IA64_THREAD_FPEMU_SHIFT 6
#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
/*
* This shift should be large enough to be able to represent 1000000000/itc_freq with good
* accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
* (this will give enough slack to represent 10 seconds worth of time as a scaled number).
*/
#define IA64_NSEC_PER_CYC_SHIFT 30
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/types.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/rse.h>
#include <asm/unwind.h>
#include <asm/atomic.h>
#ifdef CONFIG_NUMA
#include <asm/nodedata.h>
#endif
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
__u64 be : 1;
__u64 up : 1;
__u64 ac : 1;
__u64 mfl : 1;
__u64 mfh : 1;
__u64 reserved1 : 7;
__u64 ic : 1;
__u64 i : 1;
__u64 pk : 1;
__u64 reserved2 : 1;
__u64 dt : 1;
__u64 dfl : 1;
__u64 dfh : 1;
__u64 sp : 1;
__u64 pp : 1;
__u64 di : 1;
__u64 si : 1;
__u64 db : 1;
__u64 lp : 1;
__u64 tb : 1;
__u64 rt : 1;
__u64 reserved3 : 4;
__u64 cpl : 2;
__u64 is : 1;
__u64 mc : 1;
__u64 it : 1;
__u64 id : 1;
__u64 da : 1;
__u64 dd : 1;
__u64 ss : 1;
__u64 ri : 2;
__u64 ed : 1;
__u64 bn : 1;
__u64 reserved4 : 19;
};
/*
* CPU type, hardware bug flags, and per-CPU state. Frequently used
* state comes earlier:
*/
struct cpuinfo_ia64 {
__u32 softirq_pending;
__u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
__u64 *pgd_quick;
__u64 *pmd_quick;
__u64 pgtable_cache_sz;
__u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 ptce_base;
__u32 ptce_count[2];
__u32 ptce_stride[2];
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
#ifdef CONFIG_SMP
__u64 loops_per_jiffy;
int cpu;
#endif
/* CPUID-derived information: */
__u64 ppn;
__u64 features;
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
char vendor[16];
#ifdef CONFIG_NUMA
struct ia64_node_data *node_data;
#endif
};
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
extern void identify_cpu (struct cpuinfo_ia64 *);
extern void print_cpu_info (struct cpuinfo_ia64 *);
typedef struct {
unsigned long seg;
} mm_segment_t;
#define SET_UNALIGN_CTL(task,value) \
({ \
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
0; \
})
#define GET_UNALIGN_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
(int __user *) (addr)); \
})
#define SET_FPEMU_CTL(task,value) \
({ \
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
| (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
0; \
})
#define GET_FPEMU_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
(int __user *) (addr)); \
})
#ifdef CONFIG_IA32_SUPPORT
struct desc_struct {
unsigned int a, b;
};
#define desc_empty(desc) (!((desc)->a + (desc)->b))
#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
#define GDT_ENTRY_TLS_ENTRIES 3
#define GDT_ENTRY_TLS_MIN 6
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
struct partial_page_list;
#endif
struct thread_struct {
__u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
__u8 on_ustack; /* executing on user-stacks? */
__u8 pad[3];
__u64 ksp; /* kernel stack pointer */
__u64 map_base; /* base address for get_unmapped_area() */
__u64 task_size; /* limit for task size */
__u64 rbs_bot; /* the base address for the RBS */
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
#ifdef CONFIG_IA32_SUPPORT
__u64 eflag; /* IA32 EFLAGS reg */
__u64 fsr; /* IA32 floating pt status reg */
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
struct partial_page_list *ppl; /* partial page list for 4K page size issue */
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
# define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.old_k1 = 0, \
.old_iob = 0, \
.ppl = NULL,
#else
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON
__u64 pmcs[IA64_NUM_PMC_REGS];
__u64 pmds[IA64_NUM_PMD_REGS];
void *pfm_context; /* pointer to detailed PMU context */
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
# define INIT_THREAD_PM .pmcs = {0UL, }, \
.pmds = {0UL, }, \
.pfm_context = NULL, \
.pfm_needs_checking = 0UL,
#else
# define INIT_THREAD_PM
#endif
__u64 dbr[IA64_NUM_DBG_REGS];
__u64 ibr[IA64_NUM_DBG_REGS];
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
};
#define INIT_THREAD { \
.flags = 0, \
.on_ustack = 0, \
.ksp = 0, \
.map_base = DEFAULT_MAP_BASE, \
.rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
.task_size = DEFAULT_TASK_SIZE, \
.last_fph_cpu = -1, \
INIT_THREAD_IA32 \
INIT_THREAD_PM \
.dbr = {0, }, \
.ibr = {0, }, \
.fph = {{{{0}}}, } \
}
#define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
regs->ar_rnat = 0; \
regs->ar_bspstore = current->thread.rbs_bot; \
regs->ar_fpsr = FPSR_DEFAULT; \
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
if (unlikely(!current->mm->dumpable)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
*/ \
regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
} \
} while (0)
/* Forward declarations, a strange C thing... */
struct mm_struct;
struct task_struct;
/*
* Free all resources held by a thread. This is called after the
* parent of DEAD_TASK has collected the exit status of the task via
* wait().
*/
#define release_thread(dead_task)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/*
* This is the mechanism for creating a new kernel thread.
*
* NOTE 1: Only a kernel-only process (ie the swapper or direct
* descendants who haven't done an "execve()") should use this: it
* will work within a system call from a "real" process, but the
* process memory space will not be free'd until both the parent and
* the child have exited.
*
* NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
* into trouble in init/main.c when the child thread returns to
* do_basic_setup() and the timing is such that free_initmem() has
* been called already.
*/
extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
/* Get wait channel for task P. */
extern unsigned long get_wchan (struct task_struct *p);
/* Return instruction pointer of blocked task TSK. */
#define KSTK_EIP(tsk) \
({ \
struct pt_regs *_regs = ia64_task_regs(tsk); \
_regs->cr_iip + ia64_psr(_regs)->ri; \
})
/* Return stack pointer of blocked task TSK. */
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
extern void ia64_getreg_unknown_kr (void);
extern void ia64_setreg_unknown_kr (void);
#define ia64_get_kr(regnum) \
({ \
unsigned long r = 0; \
\
switch (regnum) { \
case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
default: ia64_getreg_unknown_kr(); break; \
} \
r; \
})
#define ia64_set_kr(regnum, r) \
({ \
switch (regnum) { \
case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
default: ia64_setreg_unknown_kr(); break; \
} \
})
/*
* The following three macros can't be inline functions because we don't have struct
* task_struct at this point.
*/
/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
#define ia64_is_local_fpu_owner(t) \
({ \
struct task_struct *__ia64_islfo_task = (t); \
(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
})
/* Mark task T as owning the fph partition of the CPU we're running on. */
#define ia64_set_local_fpu_owner(t) do { \
struct task_struct *__ia64_slfo_task = (t); \
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
} while (0)
/* Mark the fph partition of task T as being invalid on all CPUs. */
#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
extern void ia64_save_debug_regs (unsigned long *save_area);
extern void ia64_load_debug_regs (unsigned long *save_area);
#ifdef CONFIG_IA32_SUPPORT
extern void ia32_save_state (struct task_struct *task);
extern void ia32_load_state (struct task_struct *task);
#endif
#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
/* load fp 0.0 into fph */
static inline void
ia64_init_fpu (void) {
ia64_fph_enable();
__ia64_init_fpu();
ia64_fph_disable();
}
/* save f32-f127 at FPH */
static inline void
ia64_save_fpu (struct ia64_fpreg *fph) {
ia64_fph_enable();
__ia64_save_fpu(fph);
ia64_fph_disable();
}
/* load f32-f127 from FPH */
static inline void
ia64_load_fpu (struct ia64_fpreg *fph) {
ia64_fph_enable();
__ia64_load_fpu(fph);
ia64_fph_disable();
}
static inline __u64
ia64_clear_ic (void)
{
__u64 psr;
psr = ia64_getreg(_IA64_REG_PSR);
ia64_stop();
ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
ia64_srlz_i();
return psr;
}
/*
* Restore the psr.
*/
static inline void
ia64_set_psr (__u64 psr)
{
ia64_stop();
ia64_setreg(_IA64_REG_PSR_L, psr);
ia64_srlz_d();
}
/*
* Insert a translation into an instruction and/or data translation
* register.
*/
static inline void
ia64_itr (__u64 target_mask, __u64 tr_num,
__u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
ia64_stop();
if (target_mask & 0x1)
ia64_itri(tr_num, pte);
if (target_mask & 0x2)
ia64_itrd(tr_num, pte);
}
/*
* Insert a translation into the instruction and/or data translation
* cache.
*/
static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
{
ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
ia64_stop();
/* as per EAS2.6, itc must be the last instruction in an instruction group */
if (target_mask & 0x1)
ia64_itci(pte);
if (target_mask & 0x2)
ia64_itcd(pte);
}
/*
* Purge a range of addresses from instruction and/or data translation
* register(s).
*/
static inline void
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
{
if (target_mask & 0x1)
ia64_ptri(vmaddr, (log_size << 2));
if (target_mask & 0x2)
ia64_ptrd(vmaddr, (log_size << 2));
}
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
static inline void
ia64_set_iva (void *ivt_addr)
{
ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
ia64_srlz_i();
}
/* Set the page table address and control bits. */
static inline void
ia64_set_pta (__u64 pta)
{
/* Note: srlz.i implies srlz.d */
ia64_setreg(_IA64_REG_CR_PTA, pta);
ia64_srlz_i();
}
static inline void
ia64_eoi (void)
{
ia64_setreg(_IA64_REG_CR_EOI, 0);
ia64_srlz_d();
}
#define cpu_relax() ia64_hint(ia64_hint_pause)
static inline void
ia64_set_lrr0 (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_LRR0, val);
ia64_srlz_d();
}
static inline void
ia64_set_lrr1 (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_LRR1, val);
ia64_srlz_d();
}
/*
* Given the address to which a spill occurred, return the unat bit
* number that corresponds to this address.
*/
static inline __u64
ia64_unat_pos (void *spill_addr)
{
return ((__u64) spill_addr >> 3) & 0x3f;
}
/*
* Set the NaT bit of an integer register which was spilled at address
* SPILL_ADDR. UNAT is the mask to be updated.
*/
static inline void
ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
{
__u64 bit = ia64_unat_pos(spill_addr);
__u64 mask = 1UL << bit;
*unat = (*unat & ~mask) | (nat << bit);
}
/*
* Return saved PC of a blocked thread.
* Note that the only way T can block is through a call to schedule() -> switch_to().
*/
static inline unsigned long
thread_saved_pc (struct task_struct *t)
{
struct unw_frame_info info;
unsigned long ip;
unw_init_from_blocked_task(&info, t);
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
return ip;
}
/*
* Get the current instruction/program counter value.
*/
#define current_text_addr() \
({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
static inline __u64
ia64_get_ivr (void)
{
__u64 r;
ia64_srlz_d();
r = ia64_getreg(_IA64_REG_CR_IVR);
ia64_srlz_d();
return r;
}
static inline void
ia64_set_dbr (__u64 regnum, __u64 value)
{
__ia64_set_dbr(regnum, value);
#ifdef CONFIG_ITANIUM
ia64_srlz_d();
#endif
}
static inline __u64
ia64_get_dbr (__u64 regnum)
{
__u64 retval;
retval = __ia64_get_dbr(regnum);
#ifdef CONFIG_ITANIUM
ia64_srlz_d();
#endif
return retval;
}
static inline __u64
ia64_rotr (__u64 w, __u64 n)
{
return (w >> n) | (w << (64 - n));
}
#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
/*
* Take a mapped kernel address and return the equivalent address
* in the region 7 identity mapped virtual area.
*/
static inline void *
ia64_imva (void *addr)
{
void *result;
result = (void *) ia64_tpa(addr);
return __va(result);
}
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#define PREFETCH_STRIDE L1_CACHE_BYTES
static inline void
prefetch (const void *x)
{
ia64_lfetch(ia64_lfhint_none, x);
}
static inline void
prefetchw (const void *x)
{
ia64_lfetch_excl(ia64_lfhint_none, x);
}
#define spin_lock_prefetch(x) prefetchw(x)
extern unsigned long boot_option_idle_override;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PROCESSOR_H */

View File

@@ -0,0 +1,337 @@
#ifndef _ASM_IA64_PTRACE_H
#define _ASM_IA64_PTRACE_H
/*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2003 Intel Co
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
* 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
*
*/
/*
* When a user process is blocked, its state looks as follows:
*
* +----------------------+ ------- IA64_STK_OFFSET
* | | ^
* | struct pt_regs | |
* | | |
* +----------------------+ |
* | | |
* | memory stack | |
* | (growing downwards) | |
* //.....................// |
* |
* //.....................// |
* | | |
* +----------------------+ |
* | struct switch_stack | |
* | | |
* +----------------------+ |
* | | |
* //.....................// |
* |
* //.....................// |
* | | |
* | register stack | |
* | (growing upwards) | |
* | | |
* +----------------------+ | --- IA64_RBS_OFFSET
* | struct thread_info | | ^
* +----------------------+ | |
* | | | |
* | struct task_struct | | |
* current -> | | | |
* +----------------------+ -------
*
* Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
* This is because ar.ec is saved as part of ar.pfs.
*/
#include <linux/config.h>
#include <asm/fpu.h>
#include <asm/offsets.h>
/*
* Base-2 logarithm of number of pages to allocate per task structure
* (including register backing store and memory stack):
*/
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
# define KERNEL_STACK_SIZE_ORDER 3
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
# define KERNEL_STACK_SIZE_ORDER 2
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
# define KERNEL_STACK_SIZE_ORDER 1
#else
# define KERNEL_STACK_SIZE_ORDER 0
#endif
#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
#ifndef __ASSEMBLY__
#include <asm/current.h>
#include <asm/page.h>
/*
* This struct defines the way the registers are saved on system
* calls.
*
* We don't save all floating point register because the kernel
* is compiled to use only a very small subset, so the other are
* untouched.
*
* THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
* (because the memory stack pointer MUST ALWAYS be aligned this way)
*
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
unsigned long b7; /* scratch */
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
unsigned long ar_ssd; /* reserved for future use (scratch) */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
/*
* interrupted task's function state; if bit 63 is cleared, it
* contains syscall's ar.pfs.pfm:
*/
unsigned long cr_ifs;
unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
unsigned long ar_pfs; /* prev function state */
unsigned long ar_rsc; /* RSE configuration */
/* The following two are valid only if cr_ipsr.cpl > 0: */
unsigned long ar_rnat; /* RSE NaT */
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
/* The remaining registers are NOT saved for system calls. */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
unsigned long r19; /* scratch */
unsigned long r20; /* scratch */
unsigned long r21; /* scratch */
unsigned long r22; /* scratch */
unsigned long r23; /* scratch */
unsigned long r24; /* scratch */
unsigned long r25; /* scratch */
unsigned long r26; /* scratch */
unsigned long r27; /* scratch */
unsigned long r28; /* scratch */
unsigned long r29; /* scratch */
unsigned long r30; /* scratch */
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
* Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
/*
* This structure contains the addition registers that need to
* preserved across a context switch. This generally consists of
* "preserved" registers.
*/
struct switch_stack {
unsigned long caller_unat; /* user NaT collection register (preserved) */
unsigned long ar_fpsr; /* floating-point status register */
struct ia64_fpreg f2; /* preserved */
struct ia64_fpreg f3; /* preserved */
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
struct ia64_fpreg f15; /* scratch, but untouched by kernel */
struct ia64_fpreg f16; /* preserved */
struct ia64_fpreg f17; /* preserved */
struct ia64_fpreg f18; /* preserved */
struct ia64_fpreg f19; /* preserved */
struct ia64_fpreg f20; /* preserved */
struct ia64_fpreg f21; /* preserved */
struct ia64_fpreg f22; /* preserved */
struct ia64_fpreg f23; /* preserved */
struct ia64_fpreg f24; /* preserved */
struct ia64_fpreg f25; /* preserved */
struct ia64_fpreg f26; /* preserved */
struct ia64_fpreg f27; /* preserved */
struct ia64_fpreg f28; /* preserved */
struct ia64_fpreg f29; /* preserved */
struct ia64_fpreg f30; /* preserved */
struct ia64_fpreg f31; /* preserved */
unsigned long r4; /* preserved */
unsigned long r5; /* preserved */
unsigned long r6; /* preserved */
unsigned long r7; /* preserved */
unsigned long b0; /* so we can force a direct return in copy_thread */
unsigned long b1;
unsigned long b2;
unsigned long b3;
unsigned long b4;
unsigned long b5;
unsigned long ar_pfs; /* previous function state */
unsigned long ar_lc; /* loop counter (preserved) */
unsigned long ar_unat; /* NaT bits for r4-r7 */
unsigned long ar_rnat; /* RSE NaT collection register */
unsigned long ar_bspstore; /* RSE dirty base (preserved) */
unsigned long pr; /* 64 predicate registers (1 bit each) */
};
#ifdef __KERNEL__
/*
* We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate
* the canonical representation by adding to instruction pointer.
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
#define profile_pc(regs) \
({ \
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
/* given a pointer to a task_struct, return the user's pt_regs */
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
# define fsys_mode(task,regs) \
({ \
struct task_struct *_task = (task); \
struct pt_regs *_regs = (regs); \
!user_mode(_regs) && user_stack(_task, _regs); \
})
/*
* System call handlers that, upon successful completion, need to return a negative value
* should call force_successful_syscall_return() right before returning. On architectures
* where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
* ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
* flag will not get set. On architectures which do not support a separate error flag,
* the macro is a no-op and the spurious error condition needs to be filtered out by some
* other means (e.g., in user-level, by passing an extra argument to the syscall handler,
* or something along those lines).
*
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
*/
# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
extern void show_regs (struct pt_regs *);
extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long *);
extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long);
extern void ia64_flush_fph (struct task_struct *);
extern void ia64_sync_fph (struct task_struct *);
extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
unsigned long, unsigned long);
/* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
/* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
extern void ia64_increment_ip (struct pt_regs *pt);
extern void ia64_decrement_ip (struct pt_regs *pt);
#endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
struct pt_all_user_regs {
unsigned long nat;
unsigned long cr_iip;
unsigned long cfm;
unsigned long cr_ipsr;
unsigned long pr;
unsigned long gr[32];
unsigned long br[8];
unsigned long ar[128];
struct ia64_fpreg fr[128];
};
#endif /* !__ASSEMBLY__ */
/* indices to application-registers array in pt_all_user_regs */
#define PT_AUR_RSC 16
#define PT_AUR_BSP 17
#define PT_AUR_BSPSTORE 18
#define PT_AUR_RNAT 19
#define PT_AUR_CCV 32
#define PT_AUR_UNAT 36
#define PT_AUR_FPSR 40
#define PT_AUR_PFS 64
#define PT_AUR_LC 65
#define PT_AUR_EC 66
/*
* The numbers chosen here are somewhat arbitrary but absolutely MUST
* not overlap with any of the number assigned in <linux/ptrace.h>.
*/
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
#define PTRACE_OLDSETOPTIONS 21
#endif /* _ASM_IA64_PTRACE_H */

View File

@@ -0,0 +1,268 @@
#ifndef _ASM_IA64_PTRACE_OFFSETS_H
#define _ASM_IA64_PTRACE_OFFSETS_H
/*
* Copyright (C) 1999, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
* virtual structure that would have the following definition:
*
* struct uarea {
* struct ia64_fpreg fph[96]; // f32-f127
* unsigned long nat_bits;
* unsigned long empty1;
* struct ia64_fpreg f2; // f2-f5
* :
* struct ia64_fpreg f5;
* struct ia64_fpreg f10; // f10-f31
* :
* struct ia64_fpreg f31;
* unsigned long r4; // r4-r7
* :
* unsigned long r7;
* unsigned long b1; // b1-b5
* :
* unsigned long b5;
* unsigned long ar_ec;
* unsigned long ar_lc;
* unsigned long empty2[5];
* unsigned long cr_ipsr;
* unsigned long cr_iip;
* unsigned long cfm;
* unsigned long ar_unat;
* unsigned long ar_pfs;
* unsigned long ar_rsc;
* unsigned long ar_rnat;
* unsigned long ar_bspstore;
* unsigned long pr;
* unsigned long b6;
* unsigned long ar_bsp;
* unsigned long r1;
* unsigned long r2;
* unsigned long r3;
* unsigned long r12;
* unsigned long r13;
* unsigned long r14;
* unsigned long r15;
* unsigned long r8;
* unsigned long r9;
* unsigned long r10;
* unsigned long r11;
* unsigned long r16;
* :
* unsigned long r31;
* unsigned long ar_ccv;
* unsigned long ar_fpsr;
* unsigned long b0;
* unsigned long b7;
* unsigned long f6;
* unsigned long f7;
* unsigned long f8;
* unsigned long f9;
* unsigned long ar_csd;
* unsigned long ar_ssd;
* unsigned long rsvd1[710];
* unsigned long dbr[8];
* unsigned long rsvd2[504];
* unsigned long ibr[8];
* unsigned long rsvd3[504];
* unsigned long pmd[4];
* }
*/
/* fph: */
#define PT_F32 0x0000
#define PT_F33 0x0010
#define PT_F34 0x0020
#define PT_F35 0x0030
#define PT_F36 0x0040
#define PT_F37 0x0050
#define PT_F38 0x0060
#define PT_F39 0x0070
#define PT_F40 0x0080
#define PT_F41 0x0090
#define PT_F42 0x00a0
#define PT_F43 0x00b0
#define PT_F44 0x00c0
#define PT_F45 0x00d0
#define PT_F46 0x00e0
#define PT_F47 0x00f0
#define PT_F48 0x0100
#define PT_F49 0x0110
#define PT_F50 0x0120
#define PT_F51 0x0130
#define PT_F52 0x0140
#define PT_F53 0x0150
#define PT_F54 0x0160
#define PT_F55 0x0170
#define PT_F56 0x0180
#define PT_F57 0x0190
#define PT_F58 0x01a0
#define PT_F59 0x01b0
#define PT_F60 0x01c0
#define PT_F61 0x01d0
#define PT_F62 0x01e0
#define PT_F63 0x01f0
#define PT_F64 0x0200
#define PT_F65 0x0210
#define PT_F66 0x0220
#define PT_F67 0x0230
#define PT_F68 0x0240
#define PT_F69 0x0250
#define PT_F70 0x0260
#define PT_F71 0x0270
#define PT_F72 0x0280
#define PT_F73 0x0290
#define PT_F74 0x02a0
#define PT_F75 0x02b0
#define PT_F76 0x02c0
#define PT_F77 0x02d0
#define PT_F78 0x02e0
#define PT_F79 0x02f0
#define PT_F80 0x0300
#define PT_F81 0x0310
#define PT_F82 0x0320
#define PT_F83 0x0330
#define PT_F84 0x0340
#define PT_F85 0x0350
#define PT_F86 0x0360
#define PT_F87 0x0370
#define PT_F88 0x0380
#define PT_F89 0x0390
#define PT_F90 0x03a0
#define PT_F91 0x03b0
#define PT_F92 0x03c0
#define PT_F93 0x03d0
#define PT_F94 0x03e0
#define PT_F95 0x03f0
#define PT_F96 0x0400
#define PT_F97 0x0410
#define PT_F98 0x0420
#define PT_F99 0x0430
#define PT_F100 0x0440
#define PT_F101 0x0450
#define PT_F102 0x0460
#define PT_F103 0x0470
#define PT_F104 0x0480
#define PT_F105 0x0490
#define PT_F106 0x04a0
#define PT_F107 0x04b0
#define PT_F108 0x04c0
#define PT_F109 0x04d0
#define PT_F110 0x04e0
#define PT_F111 0x04f0
#define PT_F112 0x0500
#define PT_F113 0x0510
#define PT_F114 0x0520
#define PT_F115 0x0530
#define PT_F116 0x0540
#define PT_F117 0x0550
#define PT_F118 0x0560
#define PT_F119 0x0570
#define PT_F120 0x0580
#define PT_F121 0x0590
#define PT_F122 0x05a0
#define PT_F123 0x05b0
#define PT_F124 0x05c0
#define PT_F125 0x05d0
#define PT_F126 0x05e0
#define PT_F127 0x05f0
#define PT_NAT_BITS 0x0600
#define PT_F2 0x0610
#define PT_F3 0x0620
#define PT_F4 0x0630
#define PT_F5 0x0640
#define PT_F10 0x0650
#define PT_F11 0x0660
#define PT_F12 0x0670
#define PT_F13 0x0680
#define PT_F14 0x0690
#define PT_F15 0x06a0
#define PT_F16 0x06b0
#define PT_F17 0x06c0
#define PT_F18 0x06d0
#define PT_F19 0x06e0
#define PT_F20 0x06f0
#define PT_F21 0x0700
#define PT_F22 0x0710
#define PT_F23 0x0720
#define PT_F24 0x0730
#define PT_F25 0x0740
#define PT_F26 0x0750
#define PT_F27 0x0760
#define PT_F28 0x0770
#define PT_F29 0x0780
#define PT_F30 0x0790
#define PT_F31 0x07a0
#define PT_R4 0x07b0
#define PT_R5 0x07b8
#define PT_R6 0x07c0
#define PT_R7 0x07c8
#define PT_B1 0x07d8
#define PT_B2 0x07e0
#define PT_B3 0x07e8
#define PT_B4 0x07f0
#define PT_B5 0x07f8
#define PT_AR_EC 0x0800
#define PT_AR_LC 0x0808
#define PT_CR_IPSR 0x0830
#define PT_CR_IIP 0x0838
#define PT_CFM 0x0840
#define PT_AR_UNAT 0x0848
#define PT_AR_PFS 0x0850
#define PT_AR_RSC 0x0858
#define PT_AR_RNAT 0x0860
#define PT_AR_BSPSTORE 0x0868
#define PT_PR 0x0870
#define PT_B6 0x0878
#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */
#define PT_R1 0x0888
#define PT_R2 0x0890
#define PT_R3 0x0898
#define PT_R12 0x08a0
#define PT_R13 0x08a8
#define PT_R14 0x08b0
#define PT_R15 0x08b8
#define PT_R8 0x08c0
#define PT_R9 0x08c8
#define PT_R10 0x08d0
#define PT_R11 0x08d8
#define PT_R16 0x08e0
#define PT_R17 0x08e8
#define PT_R18 0x08f0
#define PT_R19 0x08f8
#define PT_R20 0x0900
#define PT_R21 0x0908
#define PT_R22 0x0910
#define PT_R23 0x0918
#define PT_R24 0x0920
#define PT_R25 0x0928
#define PT_R26 0x0930
#define PT_R27 0x0938
#define PT_R28 0x0940
#define PT_R29 0x0948
#define PT_R30 0x0950
#define PT_R31 0x0958
#define PT_AR_CCV 0x0960
#define PT_AR_FPSR 0x0968
#define PT_B0 0x0970
#define PT_B7 0x0978
#define PT_F6 0x0980
#define PT_F7 0x0990
#define PT_F8 0x09a0
#define PT_F9 0x09b0
#define PT_AR_CSD 0x09c0
#define PT_AR_SSD 0x09c8
#define PT_DBR 0x2000 /* data breakpoint registers */
#define PT_IBR 0x3000 /* instruction breakpoint registers */
#define PT_PMD 0x4000 /* performance monitoring counters */
#endif /* _ASM_IA64_PTRACE_OFFSETS_H */

View File

@@ -0,0 +1,58 @@
#ifndef _ASM_IA64_RESOURCE_H
#define _ASM_IA64_RESOURCE_H
/*
* Resource limits
*
* Based on <asm-i386/resource.h>.
*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#include <asm/ustack.h>
#define RLIMIT_CPU 0 /* CPU time in ms */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_CORE 4 /* max core file size */
#define RLIMIT_RSS 5 /* max resident set size */
#define RLIMIT_NPROC 6 /* max number of processes */
#define RLIMIT_NOFILE 7 /* max number of open files */
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
#define RLIMIT_AS 9 /* address space limit */
#define RLIMIT_LOCKS 10 /* maximum file locks held */
#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
#define RLIM_NLIMITS 13
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*/
#define RLIM_INFINITY (~0UL)
# ifdef __KERNEL__
#define INIT_RLIMITS \
{ \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ _STK_LIM, DEFAULT_USER_STACK_SIZE }, \
{ 0, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
{ MLOCK_LIMIT, MLOCK_LIMIT }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
{ MQ_BYTES_MAX, MQ_BYTES_MAX }, \
}
# endif /* __KERNEL__ */
#endif /* _ASM_IA64_RESOURCE_H */

View File

@@ -0,0 +1,66 @@
#ifndef _ASM_IA64_RSE_H
#define _ASM_IA64_RSE_H
/*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Register stack engine related helper functions. This file may be
* used in applications, so be careful about the name-space and give
* some consideration to non-GNU C compilers (though __inline__ is
* fine).
*/
static __inline__ unsigned long
ia64_rse_slot_num (unsigned long *addr)
{
return (((unsigned long) addr) >> 3) & 0x3f;
}
/*
* Return TRUE if ADDR is the address of an RNAT slot.
*/
static __inline__ unsigned long
ia64_rse_is_rnat_slot (unsigned long *addr)
{
return ia64_rse_slot_num(addr) == 0x3f;
}
/*
* Returns the address of the RNAT slot that covers the slot at
* address SLOT_ADDR.
*/
static __inline__ unsigned long *
ia64_rse_rnat_addr (unsigned long *slot_addr)
{
return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
}
/*
* Calculate the number of registers in the dirty partition starting at BSPSTORE and
* ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
* ar.rnat.
*/
static __inline__ unsigned long
ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
{
unsigned long slots = (bsp - bspstore);
return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
}
/*
* The inverse of the above: given bspstore and the number of
* registers, calculate ar.bsp.
*/
static __inline__ unsigned long *
ia64_rse_skip_regs (unsigned long *addr, long num_regs)
{
long delta = ia64_rse_slot_num(addr) + num_regs;
if (num_regs < 0)
delta -= 0x3e;
return addr + num_regs + delta/0x3f;
}
#endif /* _ASM_IA64_RSE_H */

View File

@@ -0,0 +1,188 @@
/*
* asm-ia64/rwsem.h: R/W semaphores for ia64
*
* Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
* Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
*
* Based on asm-i386/rwsem.h and other architecture implementation.
*
* The MSW of the count is the negated number of active writers and
* waiting lockers, and the LSW is the total number of active locks.
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case
* of an uncontended lock. Readers increment by 1 and see a positive value
* when uncontended, negative if there are writers (and maybe) readers
* waiting (in which case it goes to sleep).
*/
#ifndef _ASM_IA64_RWSEM_H
#define _ASM_IA64_RWSEM_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include <asm/intrinsics.h>
/*
* the semaphore definition
*/
struct rw_semaphore {
signed int count;
spinlock_t wait_lock;
struct list_head wait_list;
#if RWSEM_DEBUG
int debug;
#endif
};
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* initialization
*/
#if RWSEM_DEBUG
#define __RWSEM_DEBUG_INIT , 0
#else
#define __RWSEM_DEBUG_INIT /* */
#endif
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEBUG_INIT }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
static inline void
init_rwsem (struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
#if RWSEM_DEBUG
sem->debug = 0;
#endif
}
/*
* lock for reading
*/
static inline void
__down_read (struct rw_semaphore *sem)
{
int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
if (result < 0)
rwsem_down_read_failed(sem);
}
/*
* lock for writing
*/
static inline void
__down_write (struct rw_semaphore *sem)
{
int old, new;
do {
old = sem->count;
new = old + RWSEM_ACTIVE_WRITE_BIAS;
} while (cmpxchg_acq(&sem->count, old, new) != old);
if (old != 0)
rwsem_down_write_failed(sem);
}
/*
* unlock after reading
*/
static inline void
__up_read (struct rw_semaphore *sem)
{
int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void
__up_write (struct rw_semaphore *sem)
{
int old, new;
do {
old = sem->count;
new = old - RWSEM_ACTIVE_WRITE_BIAS;
} while (cmpxchg_rel(&sem->count, old, new) != old);
if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int
__down_read_trylock (struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
return 1;
}
}
return 0;
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int
__down_write_trylock (struct rw_semaphore *sem)
{
int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* downgrade write lock to read lock
*/
static inline void
__downgrade_write (struct rw_semaphore *sem)
{
int old, new;
do {
old = sem->count;
new = old - RWSEM_WAITING_BIAS;
} while (cmpxchg_rel(&sem->count, old, new) != old);
if (old < 0)
rwsem_downgrade_wake(sem);
}
/*
* Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/
#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
#endif /* _ASM_IA64_RWSEM_H */

View File

@@ -0,0 +1,834 @@
#ifndef _ASM_IA64_SAL_H
#define _ASM_IA64_SAL_H
/*
* System Abstraction Layer definitions.
*
* This is based on version 2.5 of the manual "IA-64 System
* Abstraction Layer".
*
* Copyright (C) 2001 Intel
* Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
* Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
*
* 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
* revision of the SAL spec.
* 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
* revision of the SAL spec.
* 99/09/29 davidm Updated for SAL 2.6.
* 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6)
* (plus examples of platform error info structures from smariset @ Intel)
*/
#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
#ifndef __ASSEMBLY__
#include <linux/bcd.h>
#include <linux/spinlock.h>
#include <linux/efi.h>
#include <asm/pal.h>
#include <asm/system.h>
#include <asm/fpu.h>
extern spinlock_t sal_lock;
/* SAL spec _requires_ eight args for each call. */
#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \
result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
# define SAL_CALL(result,args...) do { \
unsigned long __ia64_sc_flags; \
struct ia64_fpreg __ia64_sc_fr[6]; \
ia64_save_scratch_fpregs(__ia64_sc_fr); \
spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
__SAL_CALL(result, args); \
spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
ia64_load_scratch_fpregs(__ia64_sc_fr); \
} while (0)
# define SAL_CALL_NOLOCK(result,args...) do { \
unsigned long __ia64_scn_flags; \
struct ia64_fpreg __ia64_scn_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scn_fr); \
local_irq_save(__ia64_scn_flags); \
__SAL_CALL(result, args); \
local_irq_restore(__ia64_scn_flags); \
ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0)
# define SAL_CALL_REENTRANT(result,args...) do { \
struct ia64_fpreg __ia64_scs_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scs_fr); \
preempt_disable(); \
__SAL_CALL(result, args); \
preempt_enable(); \
ia64_load_scratch_fpregs(__ia64_scs_fr); \
} while (0)
#define SAL_SET_VECTORS 0x01000000
#define SAL_GET_STATE_INFO 0x01000001
#define SAL_GET_STATE_INFO_SIZE 0x01000002
#define SAL_CLEAR_STATE_INFO 0x01000003
#define SAL_MC_RENDEZ 0x01000004
#define SAL_MC_SET_PARAMS 0x01000005
#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
#define SAL_CACHE_FLUSH 0x01000008
#define SAL_CACHE_INIT 0x01000009
#define SAL_PCI_CONFIG_READ 0x01000010
#define SAL_PCI_CONFIG_WRITE 0x01000011
#define SAL_FREQ_BASE 0x01000012
#define SAL_UPDATE_PAL 0x01000020
struct ia64_sal_retval {
/*
* A zero status value indicates call completed without error.
* A negative status value indicates reason of call failure.
* A positive status value indicates success but an
* informational value should be printed (e.g., "reboot for
* change to take effect").
*/
s64 status;
u64 v0;
u64 v1;
u64 v2;
};
typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
enum {
SAL_FREQ_BASE_PLATFORM = 0,
SAL_FREQ_BASE_INTERVAL_TIMER = 1,
SAL_FREQ_BASE_REALTIME_CLOCK = 2
};
/*
* The SAL system table is followed by a variable number of variable
* length descriptors. The structure of these descriptors follows
* below.
* The defininition follows SAL specs from July 2000
*/
struct ia64_sal_systab {
u8 signature[4]; /* should be "SST_" */
u32 size; /* size of this table in bytes */
u8 sal_rev_minor;
u8 sal_rev_major;
u16 entry_count; /* # of entries in variable portion */
u8 checksum;
u8 reserved1[7];
u8 sal_a_rev_minor;
u8 sal_a_rev_major;
u8 sal_b_rev_minor;
u8 sal_b_rev_major;
/* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
u8 oem_id[32];
u8 product_id[32]; /* ASCII product id */
u8 reserved2[8];
};
enum sal_systab_entry_type {
SAL_DESC_ENTRY_POINT = 0,
SAL_DESC_MEMORY = 1,
SAL_DESC_PLATFORM_FEATURE = 2,
SAL_DESC_TR = 3,
SAL_DESC_PTC = 4,
SAL_DESC_AP_WAKEUP = 5
};
/*
* Entry type: Size:
* 0 48
* 1 32
* 2 16
* 3 32
* 4 16
* 5 16
*/
#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
typedef struct ia64_sal_desc_entry_point {
u8 type;
u8 reserved1[7];
u64 pal_proc;
u64 sal_proc;
u64 gp;
u8 reserved2[16];
}ia64_sal_desc_entry_point_t;
typedef struct ia64_sal_desc_memory {
u8 type;
u8 used_by_sal; /* needs to be mapped for SAL? */
u8 mem_attr; /* current memory attribute setting */
u8 access_rights; /* access rights set up by SAL */
u8 mem_attr_mask; /* mask of supported memory attributes */
u8 reserved1;
u8 mem_type; /* memory type */
u8 mem_usage; /* memory usage */
u64 addr; /* physical address of memory */
u32 length; /* length (multiple of 4KB pages) */
u32 reserved2;
u8 oem_reserved[8];
} ia64_sal_desc_memory_t;
typedef struct ia64_sal_desc_platform_feature {
u8 type;
u8 feature_mask;
u8 reserved1[14];
} ia64_sal_desc_platform_feature_t;
typedef struct ia64_sal_desc_tr {
u8 type;
u8 tr_type; /* 0 == instruction, 1 == data */
u8 regnum; /* translation register number */
u8 reserved1[5];
u64 addr; /* virtual address of area covered */
u64 page_size; /* encoded page size */
u8 reserved2[8];
} ia64_sal_desc_tr_t;
typedef struct ia64_sal_desc_ptc {
u8 type;
u8 reserved1[3];
u32 num_domains; /* # of coherence domains */
u64 domain_info; /* physical address of domain info table */
} ia64_sal_desc_ptc_t;
typedef struct ia64_sal_ptc_domain_info {
u64 proc_count; /* number of processors in domain */
u64 proc_list; /* physical address of LID array */
} ia64_sal_ptc_domain_info_t;
typedef struct ia64_sal_ptc_domain_proc_entry {
u64 id : 8; /* id of processor */
u64 eid : 8; /* eid of processor */
} ia64_sal_ptc_domain_proc_entry_t;
#define IA64_SAL_AP_EXTERNAL_INT 0
typedef struct ia64_sal_desc_ap_wakeup {
u8 type;
u8 mechanism; /* 0 == external interrupt */
u8 reserved1[6];
u64 vector; /* interrupt vector in range 0x10-0xff */
} ia64_sal_desc_ap_wakeup_t ;
extern ia64_sal_handler ia64_sal;
extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
extern unsigned short sal_revision; /* supported SAL spec revision */
extern unsigned short sal_version; /* SAL version; OEM dependent */
#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
extern const char *ia64_sal_strerror (long status);
extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
/* SAL information type encodings */
enum {
SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
SAL_INFO_TYPE_INIT = 1, /* Init information */
SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */
SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */
};
/* Encodings for machine check parameter types */
enum {
SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
};
/* Encodings for rendezvous mechanisms */
enum {
SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
};
/* Encodings for vectors which can be registered by the OS with SAL */
enum {
SAL_VECTOR_OS_MCA = 0,
SAL_VECTOR_OS_INIT = 1,
SAL_VECTOR_OS_BOOT_RENDEZ = 2
};
/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
#define SAL_MC_PARAM_RZ_ALWAYS 0x1
#define SAL_MC_PARAM_BINIT_ESCALATE 0x10
/*
* Definition of the SAL Error Log from the SAL spec
*/
/* SAL Error Record Section GUID Definitions */
#define SAL_PROC_DEV_ERR_SECT_GUID \
EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \
EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \
EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \
EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \
EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \
EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \
EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \
EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_BUS_ERR_SECT_GUID \
EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define MAX_CACHE_ERRORS 6
#define MAX_TLB_ERRORS 6
#define MAX_BUS_ERRORS 1
/* Definition of version according to SAL spec for logging purposes */
typedef struct sal_log_revision {
u8 minor; /* BCD (0..99) */
u8 major; /* BCD (0..99) */
} sal_log_revision_t;
/* Definition of timestamp according to SAL spec for logging purposes */
typedef struct sal_log_timestamp {
u8 slh_second; /* Second (0..59) */
u8 slh_minute; /* Minute (0..59) */
u8 slh_hour; /* Hour (0..23) */
u8 slh_reserved;
u8 slh_day; /* Day (1..31) */
u8 slh_month; /* Month (1..12) */
u8 slh_year; /* Year (00..99) */
u8 slh_century; /* Century (19, 20, 21, ...) */
} sal_log_timestamp_t;
/* Definition of log record header structures */
typedef struct sal_log_record_header {
u64 id; /* Unique monotonically increasing ID */
sal_log_revision_t revision; /* Major and Minor revision of header */
u16 severity; /* Error Severity */
u32 len; /* Length of this error log in bytes */
sal_log_timestamp_t timestamp; /* Timestamp */
efi_guid_t platform_guid; /* Unique OEM Platform ID */
} sal_log_record_header_t;
/* Definition of log section header structures */
typedef struct sal_log_sec_header {
efi_guid_t guid; /* Unique Section ID */
sal_log_revision_t revision; /* Major and Minor revision of Section */
u16 reserved;
u32 len; /* Section length */
} sal_log_section_hdr_t;
typedef struct sal_log_mod_error_info {
struct {
u64 check_info : 1,
requestor_identifier : 1,
responder_identifier : 1,
target_identifier : 1,
precise_ip : 1,
reserved : 59;
} valid;
u64 check_info;
u64 requestor_identifier;
u64 responder_identifier;
u64 target_identifier;
u64 precise_ip;
} sal_log_mod_error_info_t;
typedef struct sal_processor_static_info {
struct {
u64 minstate : 1,
br : 1,
cr : 1,
ar : 1,
rr : 1,
fr : 1,
reserved : 58;
} valid;
pal_min_state_area_t min_state_area;
u64 br[8];
u64 cr[128];
u64 ar[128];
u64 rr[8];
struct ia64_fpreg __attribute__ ((packed)) fr[128];
} sal_processor_static_info_t;
struct sal_cpuid_info {
u64 regs[5];
u64 reserved;
};
typedef struct sal_log_processor_info {
sal_log_section_hdr_t header;
struct {
u64 proc_error_map : 1,
proc_state_param : 1,
proc_cr_lid : 1,
psi_static_struct : 1,
num_cache_check : 4,
num_tlb_check : 4,
num_bus_check : 4,
num_reg_file_check : 4,
num_ms_check : 4,
cpuid_info : 1,
reserved1 : 39;
} valid;
u64 proc_error_map;
u64 proc_state_parameter;
u64 proc_cr_lid;
/*
* The rest of this structure consists of variable-length arrays, which can't be
* expressed in C.
*/
sal_log_mod_error_info_t info[0];
/*
* This is what the rest looked like if C supported variable-length arrays:
*
* sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
* sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
* sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
* sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
* sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
* struct sal_cpuid_info cpuid_info;
* sal_processor_static_info_t processor_static_info;
*/
} sal_log_processor_info_t;
/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
#define SAL_LPI_PSI_INFO(l) \
({ sal_log_processor_info_t *_l = (l); \
((sal_processor_static_info_t *) \
((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \
+ _l->valid.num_bus_check + _l->valid.num_reg_file_check \
+ _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \
+ sizeof(struct sal_cpuid_info)))); \
})
/* platform error log structures */
typedef struct sal_log_mem_dev_err_info {
sal_log_section_hdr_t header;
struct {
u64 error_status : 1,
physical_addr : 1,
addr_mask : 1,
node : 1,
card : 1,
module : 1,
bank : 1,
device : 1,
row : 1,
column : 1,
bit_position : 1,
requestor_id : 1,
responder_id : 1,
target_id : 1,
bus_spec_data : 1,
oem_id : 1,
oem_data : 1,
reserved : 47;
} valid;
u64 error_status;
u64 physical_addr;
u64 addr_mask;
u16 node;
u16 card;
u16 module;
u16 bank;
u16 device;
u16 row;
u16 column;
u16 bit_position;
u64 requestor_id;
u64 responder_id;
u64 target_id;
u64 bus_spec_data;
u8 oem_id[16];
u8 oem_data[1]; /* Variable length data */
} sal_log_mem_dev_err_info_t;
typedef struct sal_log_sel_dev_err_info {
sal_log_section_hdr_t header;
struct {
u64 record_id : 1,
record_type : 1,
generator_id : 1,
evm_rev : 1,
sensor_type : 1,
sensor_num : 1,
event_dir : 1,
event_data1 : 1,
event_data2 : 1,
event_data3 : 1,
reserved : 54;
} valid;
u16 record_id;
u8 record_type;
u8 timestamp[4];
u16 generator_id;
u8 evm_rev;
u8 sensor_type;
u8 sensor_num;
u8 event_dir;
u8 event_data1;
u8 event_data2;
u8 event_data3;
} sal_log_sel_dev_err_info_t;
typedef struct sal_log_pci_bus_err_info {
sal_log_section_hdr_t header;
struct {
u64 err_status : 1,
err_type : 1,
bus_id : 1,
bus_address : 1,
bus_data : 1,
bus_cmd : 1,
requestor_id : 1,
responder_id : 1,
target_id : 1,
oem_data : 1,
reserved : 54;
} valid;
u64 err_status;
u16 err_type;
u16 bus_id;
u32 reserved;
u64 bus_address;
u64 bus_data;
u64 bus_cmd;
u64 requestor_id;
u64 responder_id;
u64 target_id;
u8 oem_data[1]; /* Variable length data */
} sal_log_pci_bus_err_info_t;
typedef struct sal_log_smbios_dev_err_info {
sal_log_section_hdr_t header;
struct {
u64 event_type : 1,
length : 1,
time_stamp : 1,
data : 1,
reserved1 : 60;
} valid;
u8 event_type;
u8 length;
u8 time_stamp[6];
u8 data[1]; /* data of variable length, length == slsmb_length */
} sal_log_smbios_dev_err_info_t;
typedef struct sal_log_pci_comp_err_info {
sal_log_section_hdr_t header;
struct {
u64 err_status : 1,
comp_info : 1,
num_mem_regs : 1,
num_io_regs : 1,
reg_data_pairs : 1,
oem_data : 1,
reserved : 58;
} valid;
u64 err_status;
struct {
u16 vendor_id;
u16 device_id;
u8 class_code[3];
u8 func_num;
u8 dev_num;
u8 bus_num;
u8 seg_num;
u8 reserved[5];
} comp_info;
u32 num_mem_regs;
u32 num_io_regs;
u64 reg_data_pairs[1];
/*
* array of address/data register pairs is num_mem_regs + num_io_regs elements
* long. Each array element consists of a u64 address followed by a u64 data
* value. The oem_data array immediately follows the reg_data_pairs array
*/
u8 oem_data[1]; /* Variable length data */
} sal_log_pci_comp_err_info_t;
typedef struct sal_log_plat_specific_err_info {
sal_log_section_hdr_t header;
struct {
u64 err_status : 1,
guid : 1,
oem_data : 1,
reserved : 61;
} valid;
u64 err_status;
efi_guid_t guid;
u8 oem_data[1]; /* platform specific variable length data */
} sal_log_plat_specific_err_info_t;
typedef struct sal_log_host_ctlr_err_info {
sal_log_section_hdr_t header;
struct {
u64 err_status : 1,
requestor_id : 1,
responder_id : 1,
target_id : 1,
bus_spec_data : 1,
oem_data : 1,
reserved : 58;
} valid;
u64 err_status;
u64 requestor_id;
u64 responder_id;
u64 target_id;
u64 bus_spec_data;
u8 oem_data[1]; /* Variable length OEM data */
} sal_log_host_ctlr_err_info_t;
typedef struct sal_log_plat_bus_err_info {
sal_log_section_hdr_t header;
struct {
u64 err_status : 1,
requestor_id : 1,
responder_id : 1,
target_id : 1,
bus_spec_data : 1,
oem_data : 1,
reserved : 58;
} valid;
u64 err_status;
u64 requestor_id;
u64 responder_id;
u64 target_id;
u64 bus_spec_data;
u8 oem_data[1]; /* Variable length OEM data */
} sal_log_plat_bus_err_info_t;
/* Overall platform error section structure */
typedef union sal_log_platform_err_info {
sal_log_mem_dev_err_info_t mem_dev_err;
sal_log_sel_dev_err_info_t sel_dev_err;
sal_log_pci_bus_err_info_t pci_bus_err;
sal_log_smbios_dev_err_info_t smbios_dev_err;
sal_log_pci_comp_err_info_t pci_comp_err;
sal_log_plat_specific_err_info_t plat_specific_err;
sal_log_host_ctlr_err_info_t host_ctlr_err;
sal_log_plat_bus_err_info_t plat_bus_err;
} sal_log_platform_err_info_t;
/* SAL log over-all, multi-section error record structure (processor+platform) */
typedef struct err_rec {
sal_log_record_header_t sal_elog_header;
sal_log_processor_info_t proc_err;
sal_log_platform_err_info_t plat_err;
u8 oem_data_pad[1024];
} ia64_err_rec_t;
/*
* Now define a couple of inline functions for improved type checking
* and convenience.
*/
static inline long
ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
unsigned long *drift_info)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
*ticks_per_second = isrv.v0;
*drift_info = isrv.v1;
return isrv.status;
}
/* Flush all the processor and platform level instruction and/or data caches */
static inline s64
ia64_sal_cache_flush (u64 cache_type)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
return isrv.status;
}
/* Initialize all the processor and platform level instruction and data caches */
static inline s64
ia64_sal_cache_init (void)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
return isrv.status;
}
/*
* Clear the processor and platform information logged by SAL with respect to the machine
* state at the time of MCA's, INITs, CMCs, or CPEs.
*/
static inline s64
ia64_sal_clear_state_info (u64 sal_info_type)
{
struct ia64_sal_retval isrv;
SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
0, 0, 0, 0, 0);
return isrv.status;
}
/* Get the processor and platform information logged by SAL with respect to the machine
* state at the time of the MCAs, INITs, CMCs, or CPEs.
*/
static inline u64
ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
{
struct ia64_sal_retval isrv;
SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
sal_info, 0, 0, 0, 0);
if (isrv.status)
return 0;
return isrv.v0;
}
/*
* Get the maximum size of the information logged by SAL with respect to the machine state
* at the time of MCAs, INITs, CMCs, or CPEs.
*/
static inline u64
ia64_sal_get_state_info_size (u64 sal_info_type)
{
struct ia64_sal_retval isrv;
SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
0, 0, 0, 0, 0);
if (isrv.status)
return 0;
return isrv.v0;
}
/*
* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
* the monarch processor. Must not lock, because it will not return on any cpu until the
* monarch processor sends a wake up.
*/
static inline s64
ia64_sal_mc_rendez (void)
{
struct ia64_sal_retval isrv;
SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
return isrv.status;
}
/*
* Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
* the machine check rendezvous sequence as well as the mechanism to wake up the
* non-monarch processor at the end of machine check processing.
* Returns the complete ia64_sal_retval because some calls return more than just a status
* value.
*/
static inline struct ia64_sal_retval
ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
timeout, rz_always, 0, 0);
return isrv;
}
/* Read from PCI configuration space */
static inline s64
ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
if (value)
*value = isrv.v0;
return isrv.status;
}
/* Write to PCI configuration space */
static inline s64
ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
type, 0, 0, 0);
return isrv.status;
}
/*
* Register physical addresses of locations needed by SAL when SAL procedures are invoked
* in virtual mode.
*/
static inline s64
ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
0, 0, 0, 0, 0);
return isrv.status;
}
/*
* Register software dependent code locations within SAL. These locations are handlers or
* entry points where SAL will pass control for the specified event. These event handlers
* are for the bott rendezvous, MCAs and INIT scenarios.
*/
static inline s64
ia64_sal_set_vectors (u64 vector_type,
u64 handler_addr1, u64 gp1, u64 handler_len1,
u64 handler_addr2, u64 gp2, u64 handler_len2)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
handler_addr1, gp1, handler_len1,
handler_addr2, gp2, handler_len2);
return isrv.status;
}
/* Update the contents of PAL block in the non-volatile storage device */
static inline s64
ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
u64 *error_code, u64 *scratch_buf_size_needed)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
0, 0, 0, 0);
if (error_code)
*error_code = isrv.v0;
if (scratch_buf_size_needed)
*scratch_buf_size_needed = isrv.v1;
return isrv.status;
}
extern unsigned long sal_platform_features;
extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
struct sal_ret_values {
long r8; long r9; long r10; long r11;
};
#define IA64_SAL_OEMFUNC_MIN 0x02000000
#define IA64_SAL_OEMFUNC_MAX 0x03ffffff
extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
u64, u64, u64);
extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SAL_H */

View File

@@ -0,0 +1,28 @@
#ifndef _ASM_IA64_SCATTERLIST_H
#define _ASM_IA64_SCATTERLIST_H
/*
* Modified 1998-1999, 2001-2002, 2004
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
struct scatterlist {
struct page *page;
unsigned int offset;
unsigned int length; /* buffer length */
dma_addr_t dma_address;
unsigned int dma_length;
};
/*
* It used to be that ISA_DMA_THRESHOLD had something to do with the
* DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
* from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
* tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
* address of a page is that is allocated with GFP_DMA. On IA-64,
* that's 4GB - 1.
*/
#define ISA_DMA_THRESHOLD 0xffffffff
#endif /* _ASM_IA64_SCATTERLIST_H */

View File

@@ -0,0 +1,23 @@
#ifndef _ASM_IA64_SECTIONS_H
#define _ASM_IA64_SECTIONS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm-generic/sections.h>
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
extern char __start_gate_section[];
extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
extern char __start_unwind[], __end_unwind[];
extern char _end[]; /* end of kernel image */
#endif /* _ASM_IA64_SECTIONS_H */

View File

@@ -0,0 +1,6 @@
#ifndef _ASM_IA64_SEGMENT_H
#define _ASM_IA64_SEGMENT_H
/* Only here because we have some old header files that expect it.. */
#endif /* _ASM_IA64_SEGMENT_H */

View File

@@ -0,0 +1,102 @@
#ifndef _ASM_IA64_SEMAPHORE_H
#define _ASM_IA64_SEMAPHORE_H
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.sleepers = 0, \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1)
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
static inline void
sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
}
static inline void
init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void
init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
extern void __down (struct semaphore * sem);
extern int __down_interruptible (struct semaphore * sem);
extern int __down_trylock (struct semaphore * sem);
extern void __up (struct semaphore * sem);
/*
* Atomically decrement the semaphore's count. If it goes negative,
* block the calling thread in the TASK_UNINTERRUPTIBLE state.
*/
static inline void
down (struct semaphore *sem)
{
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
/*
* Atomically decrement the semaphore's count. If it goes negative,
* block the calling thread in the TASK_INTERRUPTIBLE state.
*/
static inline int
down_interruptible (struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
}
static inline int
down_trylock (struct semaphore *sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
}
static inline void
up (struct semaphore * sem)
{
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
#endif /* _ASM_IA64_SEMAPHORE_H */

View File

@@ -0,0 +1,22 @@
#ifndef _ASM_IA64_SEMBUF_H
#define _ASM_IA64_SEMBUF_H
/*
* The semid64_ds structure for IA-64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
__kernel_time_t sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _ASM_IA64_SEMBUF_H */

View File

@@ -0,0 +1,19 @@
/*
* include/asm-ia64/serial.h
*
* Derived from the i386 version.
*/
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
/*
* All legacy serial ports should be enumerated via ACPI namespace, so
* we need not list them here.
*/

View File

@@ -0,0 +1,6 @@
#ifndef __IA64_SETUP_H
#define __IA64_SETUP_H
#define COMMAND_LINE_SIZE 512
#endif

View File

@@ -0,0 +1,38 @@
#ifndef _ASM_IA64_SHMBUF_H
#define _ASM_IA64_SHMBUF_H
/*
* The shmid64_ds structure for IA-64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
__kernel_time_t shm_dtime; /* last detach time */
__kernel_time_t shm_ctime; /* last change time */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused1;
unsigned long __unused2;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* _ASM_IA64_SHMBUF_H */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_IA64_SHMPARAM_H
#define _ASM_IA64_SHMPARAM_H
/*
* SHMLBA controls minimum alignment at which shared memory segments
* get attached. The IA-64 architecture says that there may be a
* performance degradation when there are virtual aliases within 1MB.
* To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
*/
#define SHMLBA (1024*1024)
#endif /* _ASM_IA64_SHMPARAM_H */

View File

@@ -0,0 +1,70 @@
#ifndef _ASM_IA64_SIGCONTEXT_H
#define _ASM_IA64_SIGCONTEXT_H
/*
* Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
* Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/fpu.h>
#define IA64_SC_FLAG_ONSTACK_BIT 0 /* is handler running on signal stack? */
#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */
#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */
#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT)
#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT)
# ifndef __ASSEMBLY__
/*
* Note on handling of register backing store: sc_ar_bsp contains the address that would
* be found in ar.bsp after executing a "cover" instruction the context in which the
* signal was raised. If signal delivery required switching to an alternate signal stack
* (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
* imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
* original one. In this case, sc_rbs_base contains the base address of the new register
* backing store. The number of registers in the dirty partition can be calculated as:
*
* ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
*
*/
struct sigcontext {
unsigned long sc_flags; /* see manifest constants above */
unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */
stack_t sc_stack; /* previously active stack */
unsigned long sc_ip; /* instruction pointer */
unsigned long sc_cfm; /* current frame marker */
unsigned long sc_um; /* user mask bits */
unsigned long sc_ar_rsc; /* register stack configuration register */
unsigned long sc_ar_bsp; /* backing store pointer */
unsigned long sc_ar_rnat; /* RSE NaT collection register */
unsigned long sc_ar_ccv; /* compare and exchange compare value register */
unsigned long sc_ar_unat; /* ar.unat of interrupted context */
unsigned long sc_ar_fpsr; /* floating-point status register */
unsigned long sc_ar_pfs; /* previous function state */
unsigned long sc_ar_lc; /* loop count register */
unsigned long sc_pr; /* predicate registers */
unsigned long sc_br[8]; /* branch registers */
/* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
unsigned long sc_gr[32]; /* general registers (static partition) */
struct ia64_fpreg sc_fr[128]; /* floating-point registers */
unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
unsigned long sc_loadrs; /* see description above */
unsigned long sc_ar25; /* cmp8xchg16 uses this */
unsigned long sc_ar26; /* rsvd for scratch use */
unsigned long sc_rsvd[12]; /* reserved for future use */
/*
* The mask must come last so we can increase _NSIG_WORDS
* without breaking binary compatibility.
*/
sigset_t sc_mask; /* signal mask to restore after handler returns */
};
# endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SIGCONTEXT_H */

View File

@@ -0,0 +1,141 @@
#ifndef _ASM_IA64_SIGINFO_H
#define _ASM_IA64_SIGINFO_H
/*
* Based on <asm-i386/siginfo.h>.
*
* Modified 1998-2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4)
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
#define HAVE_ARCH_SIGINFO_T
#define HAVE_ARCH_COPY_SIGINFO
#define HAVE_ARCH_COPY_SIGINFO_TO_USER
#include <asm-generic/siginfo.h>
typedef struct siginfo {
int si_signo;
int si_errno;
int si_code;
int __pad0;
union {
int _pad[SI_PAD_SIZE];
/* kill() */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* must overlay ._rt._sigval! */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
void __user *_addr; /* faulting insn/memory ref. */
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see below */
unsigned long _isr; /* isr */
} _sigfault;
/* SIGPOLL */
struct {
long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
int _fd;
} _sigpoll;
} _sifields;
} siginfo_t;
#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
#define si_flags _sifields._sigfault._flags
/*
* si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that
* si_code is non-zero and __ISR_VALID is set in si_flags.
*/
#define si_isr _sifields._sigfault._isr
/*
* Flag values for si_flags:
*/
#define __ISR_VALID_BIT 0
#define __ISR_VALID (1 << __ISR_VALID_BIT)
/*
* SIGILL si_codes
*/
#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */
#define __ILL_BREAK (__SI_FAULT|10) /* illegal break */
#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */
#undef NSIGILL
#define NSIGILL 11
/*
* SIGFPE si_codes
*/
#define __FPE_DECOVF (__SI_FAULT|9) /* decimal overflow */
#define __FPE_DECDIV (__SI_FAULT|10) /* decimal division by zero */
#define __FPE_DECERR (__SI_FAULT|11) /* packed decimal error */
#define __FPE_INVASC (__SI_FAULT|12) /* invalid ASCII digit */
#define __FPE_INVDEC (__SI_FAULT|13) /* invalid decimal digit */
#undef NSIGFPE
#define NSIGFPE 13
/*
* SIGSEGV si_codes
*/
#define __SEGV_PSTKOVF (__SI_FAULT|3) /* paragraph stack overflow */
#undef NSIGSEGV
#define NSIGSEGV 3
/*
* SIGTRAP si_codes
*/
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
#undef NSIGTRAP
#define NSIGTRAP 4
#ifdef __KERNEL__
#include <linux/string.h>
static inline void
copy_siginfo (siginfo_t *to, siginfo_t *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
else
/* _sigchld is currently the largest know union member */
memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
}
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SIGINFO_H */

View File

@@ -0,0 +1,183 @@
#ifndef _ASM_IA64_SIGNAL_H
#define _ASM_IA64_SIGNAL_H
/*
* Modified 1998-2001, 2003
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*
* Unfortunately, this file is being included by bits/signal.h in
* glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
*/
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_INTERRUPT is a no-op, but left due to historical reasons.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
/*
* The minimum stack size needs to be fairly large because we want to
* be sure that an app compiled for today's CPUs will continue to run
* on all future CPU models. The CPU model matters because the signal
* frame needs to have space for the complete machine state, including
* all physical stacked registers. The number of physical stacked
* registers is CPU model dependent, but given that the width of
* ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
* more than 16KB of space.
*/
#if 1
/*
* This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
* in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
* incorrect value and fix libc only.
*/
# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
#else
# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
#endif
#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
#ifdef __KERNEL__
#define _NSIG 64
#define _NSIG_BPW 64
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
/*
* These values of sa_flags are used only by the kernel as part of the
* irq handling routines.
*
* SA_INTERRUPT is also used by the irq handling routines.
* SA_SHIRQ is for shared interrupt support on PCI and EISA.
*/
#define SA_PROBE SA_ONESHOT
#define SA_SAMPLE_RANDOM SA_RESTART
#define SA_SHIRQ 0x04000000
#define SA_PERCPU_IRQ 0x02000000
#endif /* __KERNEL__ */
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
# ifndef __ASSEMBLY__
# include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
/* Type of a signal handler. */
typedef void __user (*__sighandler_t)(int);
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
typedef unsigned long old_sigset_t;
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
# include <asm/sigcontext.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
# endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_SIGNAL_H */

Some files were not shown because too many files have changed in this diff Show More