(2006-08-06) rescue-bootcd

This commit is contained in:
2006-08-06 00:00:00 +02:00
parent 2f796b816a
commit decb062d20
21091 changed files with 7076462 additions and 0 deletions

View File

@@ -0,0 +1,81 @@
#ifndef _ASM_GENERIC_BITOPS_H_
#define _ASM_GENERIC_BITOPS_H_
/*
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents. You should
* recode these in the native assembly language, if at all possible.
* To guarantee atomicity, these routines call cli() and sti() to
* disable interrupts while they operate. (You have to provide inline
* routines to cli() and sti().)
*
* Also note, these routines assume that you have 32 bit longs.
* You will have to change this if you are trying to port Linux to the
* Alpha architecture or to a Cray. :-)
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
extern __inline__ int set_bit(int nr,long * addr)
{
int mask, retval;
addr += nr >> 5;
mask = 1 << (nr & 0x1f);
cli();
retval = (mask & *addr) != 0;
*addr |= mask;
sti();
return retval;
}
extern __inline__ int clear_bit(int nr, long * addr)
{
int mask, retval;
addr += nr >> 5;
mask = 1 << (nr & 0x1f);
cli();
retval = (mask & *addr) != 0;
*addr &= ~mask;
sti();
return retval;
}
extern __inline__ int test_bit(int nr, const unsigned long * addr)
{
int mask;
addr += nr >> 5;
mask = 1 << (nr & 0x1f);
return ((mask & *addr) != 0);
}
/*
* fls: find last bit set.
*/
#define fls(x) generic_fls(x)
#ifdef __KERNEL__
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
#define ffs(x) generic_ffs(x)
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_BITOPS_H */

View File

@@ -0,0 +1,34 @@
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
#include <linux/config.h>
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
panic("BUG!"); \
} while (0)
#endif
#ifndef HAVE_ARCH_PAGE_BUG
#define PAGE_BUG(page) do { \
printk("page BUG for page at %p\n", page); \
BUG(); \
} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
#endif
#ifndef HAVE_ARCH_WARN_ON
#define WARN_ON(condition) do { \
if (unlikely((condition)!=0)) { \
printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
dump_stack(); \
} \
} while (0)
#endif
#endif

View File

@@ -0,0 +1,58 @@
#ifndef _ASM_GENERIC_DIV64_H
#define _ASM_GENERIC_DIV64_H
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
* Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
* return remainder;
* }
*
* NOTE: macro parameter n is evaluated multiple times,
* beware of side effects!
*/
#include <linux/types.h>
#include <linux/compiler.h>
#if BITS_PER_LONG == 64
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
*/
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
#endif /* BITS_PER_LONG */
#endif /* _ASM_GENERIC_DIV64_H */

View File

@@ -0,0 +1,22 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
/* This is used for archs that do not support DMA */
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
int flag)
{
BUG();
return NULL;
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG();
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */

View File

@@ -0,0 +1,309 @@
/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
*
* Implements the generic device dma API via the existing pci_ one
* for unconverted architectures
*/
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
#include <linux/config.h>
#ifdef CONFIG_PCI
/* we implement the API below in terms of the existing PCI one,
* so include it */
#include <linux/pci.h>
/* need struct page definitions */
#include <linux/mm.h>
static inline int
dma_supported(struct device *dev, u64 mask)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_dma_supported(to_pci_dev(dev), mask);
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
int flag)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
}
static inline int
dma_mapping_error(dma_addr_t dma_addr)
{
return pci_dma_mapping_error(dma_addr);
}
#else
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 0;
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG();
return 0;
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
int flag)
{
BUG();
return NULL;
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG();
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_error(dma_addr_t dma_addr)
{
return 0;
}
#endif
/* Now for the API extensions over the pci_ one */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d) (1)
static inline int
dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return (1 << L1_CACHE_SHIFT_MAX);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
}
static inline void
dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
{
/* could define this in terms of the dma_cache ... operations,
* but if you get this on a platform, you should convert the platform
* to using the generic device DMA API */
BUG();
}
#endif

View File

@@ -0,0 +1,39 @@
#ifndef _ASM_GENERIC_ERRNO_BASE_H
#define _ASM_GENERIC_ERRNO_BASE_H
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#endif

View File

@@ -0,0 +1,105 @@
#ifndef _ASM_GENERIC_ERRNO_H
#define _ASM_GENERIC_ERRNO_H
#include <asm-generic/errno-base.h>
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#define ECANCELED 125 /* Operation Canceled */
#define ENOKEY 126 /* Required key not available */
#define EKEYEXPIRED 127 /* Key has expired */
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
#endif

View File

@@ -0,0 +1,8 @@
#warning <asm/hdreg.h> is obsolete, please do not use it
#ifndef __ASM_GENERIC_HDREG_H
#define __ASM_GENERIC_HDREG_H
typedef unsigned long ide_ioreg_t;
#endif /* __ASM_GENERIC_HDREG_H */

View File

@@ -0,0 +1,38 @@
/* Generic I/O and MEMIO string operations. */
#define __ide_insw insw
#define __ide_insl insl
#define __ide_outsw outsw
#define __ide_outsl outsl
static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u16 *)addr = readw(port);
addr += 2;
}
}
static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u32 *)addr = readl(port);
addr += 4;
}
}
static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
writew(*(u16 *)addr, port);
addr += 2;
}
}
static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
{
while (count--) {
writel(*(u32 *)addr, port);
addr += 4;
}
}

View File

@@ -0,0 +1,63 @@
#ifndef __GENERIC_IO_H
#define __GENERIC_IO_H
#include <linux/linkage.h>
/*
* These are the "generic" interfaces for doing new-style
* memory-mapped or PIO accesses. Architectures may do
* their own arch-optimized versions, these just act as
* wrappers around the old-style IO register access functions:
* read[bwl]/write[bwl]/in[bwl]/out[bwl]
*
* Don't include this directly, include it from <asm/io.h>.
*/
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines just encode the PIO/MMIO as part of the
* cookie, and coldly assume that the MMIO IO mappings are not
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
extern unsigned int fastcall ioread8(void __iomem *);
extern unsigned int fastcall ioread16(void __iomem *);
extern unsigned int fastcall ioread32(void __iomem *);
extern void fastcall iowrite8(u8, void __iomem *);
extern void fastcall iowrite16(u16, void __iomem *);
extern void fastcall iowrite32(u32, void __iomem *);
/*
* "string" versions of the above. Note that they
* use native byte ordering for the accesses (on
* the assumption that IO and memory agree on a
* byte order, and CPU byteorder is irrelevant).
*
* They do _not_ update the port address. If you
* want MMIO that copies stuff laid out in MMIO
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count);
extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count);
extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count);
extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#endif

View File

@@ -0,0 +1,118 @@
#ifndef _ASM_GENERIC_LOCAL_H
#define _ASM_GENERIC_LOCAL_H
#include <linux/config.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/types.h>
/* An unsigned long type for operations which are atomic for a single
* CPU. Usually used in combination with per-cpu variables. */
#if BITS_PER_LONG == 32
/* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
typedef struct
{
atomic_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_INIT(i) }
#define local_read(l) ((unsigned long)atomic_read(&(l)->a))
#define local_set(l,i) atomic_set((&(l)->a),(i))
#define local_inc(l) atomic_inc(&(l)->a)
#define local_dec(l) atomic_dec(&(l)->a)
#define local_add(i,l) atomic_add((i),(&(l)->a))
#define local_sub(i,l) atomic_sub((i),(&(l)->a))
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local_inc(l) local_set((l), local_read(l) + 1)
#define __local_dec(l) local_set((l), local_read(l) - 1)
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
#else /* ... can't use atomics. */
/* Implement in terms of three variables.
Another option would be to use local_irq_save/restore. */
typedef struct
{
/* 0 = in hardirq, 1 = in softirq, 2 = usermode. */
unsigned long v[3];
} local_t;
#define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()])
#define LOCAL_INIT(i) { { (i), 0, 0 } }
static inline unsigned long local_read(local_t *l)
{
return l->v[0] + l->v[1] + l->v[2];
}
static inline void local_set(local_t *l, unsigned long v)
{
l->v[0] = v;
l->v[1] = l->v[2] = 0;
}
static inline void local_inc(local_t *l)
{
preempt_disable();
_LOCAL_VAR(l)++;
preempt_enable();
}
static inline void local_dec(local_t *l)
{
preempt_disable();
_LOCAL_VAR(l)--;
preempt_enable();
}
static inline void local_add(unsigned long v, local_t *l)
{
preempt_disable();
_LOCAL_VAR(l) += v;
preempt_enable();
}
static inline void local_sub(unsigned long v, local_t *l)
{
preempt_disable();
_LOCAL_VAR(l) -= v;
preempt_enable();
}
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local_inc(l) ((l)->v[0]++)
#define __local_dec(l) ((l)->v[0]--)
#define __local_add(i,l) ((l)->v[0] += (i))
#define __local_sub(i,l) ((l)->v[0] -= (i))
#endif /* Non-atomic implementation */
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable (eg. mystruct.foo), not an address.
*/
#define cpu_local_read(v) local_read(&__get_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
/* Non-atomic increments, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well.
*/
#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
#endif /* _ASM_GENERIC_LOCAL_H */

View File

@@ -0,0 +1,107 @@
/* include this file if the platform implements the dma_ DMA Mapping API
* and wants to provide the pci_ DMA Mapping API in terms of it */
#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
#define _ASM_GENERIC_PCI_DMA_COMPAT_H
#include <linux/dma-mapping.h>
/* note pci_set_dma_mask isn't here, since it's a public function
* exported from drivers/pci, use dma_supported instead */
static inline int
pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
}
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline int
pci_dma_mapping_error(dma_addr_t dma_addr)
{
return dma_mapping_error(dma_addr);
}
#endif

View File

@@ -0,0 +1,34 @@
/*
* linux/include/asm-generic/pci.h
*
* Copyright (C) 2003 Russell King
*/
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
/**
* pcibios_resource_to_bus - convert resource to PCI bus address
* @dev: device which owns this resource
* @region: converted bus-centric region (start,end)
* @res: resource to convert
*
* Convert a resource to a PCI device bus address or bus window.
*/
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
#define pcibios_scan_all_fns(a, b) 0
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
#endif

View File

@@ -0,0 +1,42 @@
#ifndef _ASM_GENERIC_PERCPU_H_
#define _ASM_GENERIC_PERCPU_H_
#include <linux/compiler.h>
#define __GENERIC_PER_CPU
#ifdef CONFIG_SMP
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for (__i = 0; __i < NR_CPUS; __i++) \
if (cpu_possible(__i)) \
memcpy((pcpudst)+__per_cpu_offset[__i], \
(src), (size)); \
} while (0)
#else /* ! SMP */
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#endif /* _ASM_GENERIC_PERCPU_H_ */

View File

@@ -0,0 +1,137 @@
#ifndef _ASM_GENERIC_PGTABLE_H
#define _ASM_GENERIC_PGTABLE_H
#ifndef __HAVE_ARCH_PTEP_ESTABLISH
/*
* Establish a new mapping:
* - flush the old one
* - update the page tables
* - inform the TLB about the new one
*
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
*
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
*/
#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte_atomic(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
#endif
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
* accessed, and writable). Furthermore, we know it always gets set
* to a "more permissive" setting, which allows most architectures
* to optimize this.
*/
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
do { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
return 1;
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
int __young = ptep_test_and_clear_young(__ptep); \
if (__young) \
flush_tlb_page(__vma, __address); \
__young; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_dirty(pte))
return 0;
set_pte(ptep, pte_mkclean(pte));
return 1;
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
({ \
int __dirty = ptep_test_and_clear_dirty(__ptep); \
if (__dirty) \
flush_tlb_page(__vma, __address); \
__dirty; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(ptep);
return pte;
}
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define ptep_clear_flush(__vma, __address, __ptep) \
({ \
pte_t __pte = ptep_get_and_clear(__ptep); \
flush_tlb_page(__vma, __address); \
__pte; \
})
#endif
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
}
#endif
#ifndef __HAVE_ARCH_PTEP_MKDIRTY
static inline void ptep_mkdirty(pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
}
#endif
#ifndef __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
#define page_test_and_clear_dirty(page) (0)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#define page_test_and_clear_young(page) (0)
#endif
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
#endif /* _ASM_GENERIC_PGTABLE_H */

View File

@@ -0,0 +1,213 @@
/*
* inclue/asm-generic/rtc.h
*
* Author: Tom Rini <trini@mvista.com>
*
* Based on:
* drivers/char/rtc.c
*
* Please read the COPYING file for all license details.
*/
#ifndef __ASM_RTC_H__
#define __ASM_RTC_H__
#ifdef __KERNEL__
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
#define RTC_UIE 0x10 /* update-finished interrupt enable */
/* some dummy definitions */
#define RTC_BATT_BAD 0x100 /* battery bad */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
/*
* Returns true if a clock update is in progress
*/
static inline unsigned char rtc_is_updating(void)
{
unsigned char uip;
spin_lock_irq(&rtc_lock);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irq(&rtc_lock);
return uip;
}
static inline unsigned int get_rtc_time(struct rtc_time *time)
{
unsigned long uip_watchdog = jiffies;
unsigned char ctrl;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 10 to 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
if (rtc_is_updating() != 0)
while (jiffies - uip_watchdog < 2*HZ/100) {
barrier();
cpu_relax();
}
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irq(&rtc_lock);
time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
time->tm_mon = CMOS_READ(RTC_MONTH);
time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
BCD_TO_BIN(time->tm_sec);
BCD_TO_BIN(time->tm_min);
BCD_TO_BIN(time->tm_hour);
BCD_TO_BIN(time->tm_mday);
BCD_TO_BIN(time->tm_mon);
BCD_TO_BIN(time->tm_year);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (time->tm_year <= 69)
time->tm_year += 100;
time->tm_mon--;
return RTC_24H;
}
/* Set the current date and time in the real time clock. */
static inline int set_rtc_time(struct rtc_time *time)
{
unsigned char mon, day, hrs, min, sec;
unsigned char save_control, save_freq_select;
unsigned int yrs;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
day = time->tm_mday;
hrs = time->tm_hour;
min = time->tm_min;
sec = time->tm_sec;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irq(&rtc_lock);
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
!((yrs + 1900) % 400));
yrs = 72;
/*
* We want to keep the year set to 73 until March
* for non-leap years, so that Feb, 29th is handled
* correctly.
*/
if (!leap_yr && mon < 3) {
real_yrs--;
yrs = 73;
}
#endif
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
if (yrs > 169) {
spin_unlock_irq(&rtc_lock);
return -EINVAL;
}
if (yrs >= 100)
yrs -= 100;
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
BIN_TO_BCD(sec);
BIN_TO_BCD(min);
BIN_TO_BCD(hrs);
BIN_TO_BCD(day);
BIN_TO_BCD(mon);
BIN_TO_BCD(yrs);
}
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
#endif
CMOS_WRITE(yrs, RTC_YEAR);
CMOS_WRITE(mon, RTC_MONTH);
CMOS_WRITE(day, RTC_DAY_OF_MONTH);
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irq(&rtc_lock);
return 0;
}
static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
get_rtc_time(&h);
return h.tm_sec;
}
static inline int get_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
static inline int set_rtc_pll(struct rtc_pll_info *pll)
{
return -EINVAL;
}
#endif /* __KERNEL__ */
#endif /* __ASM_RTC_H__ */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_GENERIC_SECTIONS_H_
#define _ASM_GENERIC_SECTIONS_H_
/* References to section boundaries */
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
#endif /* _ASM_GENERIC_SECTIONS_H_ */

View File

@@ -0,0 +1,295 @@
#ifndef _ASM_GENERIC_SIGINFO_H
#define _ASM_GENERIC_SIGINFO_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/resource.h>
typedef union sigval {
int sival_int;
void __user *sival_ptr;
} sigval_t;
/*
* This is the size (including padding) of the part of the
* struct siginfo that is before the union.
*/
#ifndef __ARCH_SI_PREAMBLE_SIZE
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
#endif
#define SI_MAX_SIZE 128
#ifndef SI_PAD_SIZE
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
#ifndef __ARCH_SI_UID_T
#define __ARCH_SI_UID_T uid_t
#endif
/*
* The default "si_band" type is "long", as specified by POSIX.
* However, some architectures want to override this to "int"
* for historical compatibility reasons, so we allow that.
*/
#ifndef __ARCH_SI_BAND_T
#define __ARCH_SI_BAND_T long
#endif
#ifndef HAVE_ARCH_SIGINFO_T
typedef struct siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[SI_PAD_SIZE];
/* kill() */
struct {
pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
__ARCH_SI_UID_T _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
} _sigfault;
/* SIGPOLL */
struct {
__ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} siginfo_t;
#endif
/*
* How these fields are to be accessed.
*/
#define si_pid _sifields._kill._pid
#define si_uid _sifields._kill._uid
#define si_tid _sifields._timer._tid
#define si_overrun _sifields._timer._overrun
#define si_sys_private _sifields._timer._sys_private
#define si_status _sifields._sigchld._status
#define si_utime _sifields._sigchld._utime
#define si_stime _sifields._sigchld._stime
#define si_value _sifields._rt._sigval
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
#endif
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#ifdef __KERNEL__
#define __SI_MASK 0xffff0000u
#define __SI_KILL (0 << 16)
#define __SI_TIMER (1 << 16)
#define __SI_POLL (2 << 16)
#define __SI_FAULT (3 << 16)
#define __SI_CHLD (4 << 16)
#define __SI_RT (5 << 16)
#define __SI_MESGQ (6 << 16)
#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
#else
#define __SI_KILL 0
#define __SI_TIMER 0
#define __SI_POLL 0
#define __SI_FAULT 0
#define __SI_CHLD 0
#define __SI_RT 0
#define __SI_MESGQ 0
#define __SI_CODE(T,N) (N)
#endif
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
#define SI_USER 0 /* sent by kill, sigsend, raise */
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
#define SI_QUEUE -1 /* sent by sigqueue */
#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
#define SI_ASYNCIO -4 /* sent by AIO completion */
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
#ifndef HAVE_ARCH_SI_CODES
/*
* SIGILL si_codes
*/
#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
#define NSIGILL 8
/*
* SIGFPE si_codes
*/
#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
#define NSIGFPE 8
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
#define NSIGSEGV 2
/*
* SIGBUS si_codes
*/
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
#define NSIGBUS 3
/*
* SIGTRAP si_codes
*/
#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define NSIGTRAP 2
/*
* SIGCHLD si_codes
*/
#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
#define NSIGCHLD 6
/*
* SIGPOLL si_codes
*/
#define POLL_IN (__SI_POLL|1) /* data input available */
#define POLL_OUT (__SI_POLL|2) /* output buffers available */
#define POLL_MSG (__SI_POLL|3) /* input message available */
#define POLL_ERR (__SI_POLL|4) /* i/o error */
#define POLL_PRI (__SI_POLL|5) /* high priority input available */
#define POLL_HUP (__SI_POLL|6) /* device disconnected */
#define NSIGPOLL 6
#endif
/*
* sigevent definitions
*
* It seems likely that SIGEV_THREAD will have to be handled from
* userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
* thread manager then catches and does the appropriate nonsense.
* However, everything is written out here so as to not get lost.
*/
#define SIGEV_SIGNAL 0 /* notify via signal */
#define SIGEV_NONE 1 /* other notification: meaningless */
#define SIGEV_THREAD 2 /* deliver via thread creation */
#define SIGEV_THREAD_ID 4 /* deliver to thread */
#define SIGEV_MAX_SIZE 64
#ifndef SIGEV_PAD_SIZE
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
#endif
#ifndef HAVE_ARCH_SIGEVENT_T
typedef struct sigevent {
sigval_t sigev_value;
int sigev_signo;
int sigev_notify;
union {
int _pad[SIGEV_PAD_SIZE];
int _tid;
struct {
void (*_function)(sigval_t);
void *_attribute; /* really pthread_attr_t */
} _sigev_thread;
} _sigev_un;
} sigevent_t;
#endif
#define sigev_notify_function _sigev_un._sigev_thread._function
#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
#define sigev_notify_thread_id _sigev_un._tid
#ifdef __KERNEL__
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
#ifndef HAVE_ARCH_COPY_SIGINFO
#include <linux/string.h>
static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(*to));
else
/* _sigchld is currently the largest know union member */
memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
}
#endif
extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,51 @@
#ifndef _GENERIC_STATFS_H
#define _GENERIC_STATFS_H
#ifndef __KERNEL_STRICT_NAMES
# include <linux/types.h>
typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
__u32 f_type;
__u32 f_bsize;
__u32 f_blocks;
__u32 f_bfree;
__u32 f_bavail;
__u32 f_files;
__u32 f_ffree;
__kernel_fsid_t f_fsid;
__u32 f_namelen;
__u32 f_frsize;
__u32 f_spare[5];
};
struct statfs64 {
__u32 f_type;
__u32 f_bsize;
__u64 f_blocks;
__u64 f_bfree;
__u64 f_bavail;
__u64 f_files;
__u64 f_ffree;
__kernel_fsid_t f_fsid;
__u32 f_namelen;
__u32 f_frsize;
__u32 f_spare[5];
};
struct compat_statfs64 {
__u32 f_type;
__u32 f_bsize;
__u64 f_blocks;
__u64 f_bfree;
__u64 f_bavail;
__u64 f_files;
__u64 f_ffree;
__kernel_fsid_t f_fsid;
__u32 f_namelen;
__u32 f_frsize;
__u32 f_spare[5];
};
#endif

View File

@@ -0,0 +1,152 @@
/* asm-generic/tlb.h
*
* Generic TLB shootdown code
*
* Copyright 2001 Red Hat, Inc.
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
#include <linux/config.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* For UP we don't need to worry about TLB flush
* and page free order so much..
*/
#ifdef CONFIG_SMP
#define FREE_PTE_NR 506
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
#define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1
#endif
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page. This structure
* can be per-CPU or per-MM as the page table lock is held for the duration of
* TLB shootdown.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* set to ~0U means fast mode */
unsigned int need_flush;/* Really unmapped some ptes? */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed;
struct page * pages[FREE_PTE_NR];
};
/* Users of the generic TLB shootdown code must declare this storage space. */
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/* tlb_gather_mmu
* Return a pointer to an initialized struct mmu_gather.
*/
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
tlb->mm = mm;
/* Use fast mode if only one CPU is online */
tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
tlb->fullmm = full_mm_flush;
tlb->freed = 0;
return tlb;
}
static inline void
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
}
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required. The page table lock is still held at this point.
*/
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
int freed = tlb->freed;
struct mm_struct *mm = tlb->mm;
int rss = mm->rss;
if (rss < freed)
freed = rss;
mm->rss = rss - freed;
tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
}
static inline unsigned int
tlb_is_full_mm(struct mmu_gather *tlb)
{
return tlb->fullmm;
}
/* tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return;
}
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
tlb_flush_mmu(tlb, 0, 0);
}
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
* Record the fact that pte's were really umapped in ->need_flush, so we can
* later optimise away the tlb invalidate. This helps when userspace is
* unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define pte_free_tlb(tlb, ptep) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep); \
} while (0)
#define pmd_free_tlb(tlb, pmdp) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, pmdp); \
} while (0)
#define tlb_migrate_finish(mm) do {} while (0)
#endif /* _ASM_GENERIC__TLB_H */

View File

@@ -0,0 +1,48 @@
/*
* linux/include/asm-generic/topology.h
*
* Written by: Matthew Dobson, IBM Corporation
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <colpatch@us.ibm.com>
*/
#ifndef _ASM_GENERIC_TOPOLOGY_H
#define _ASM_GENERIC_TOPOLOGY_H
/* Other architectures wishing to use this simple topology API should fill
in the below functions as appropriate in their own <asm/topology.h> file. */
#ifndef cpu_to_node
#define cpu_to_node(cpu) (0)
#endif
#ifndef parent_node
#define parent_node(node) (0)
#endif
#ifndef node_to_cpumask
#define node_to_cpumask(node) (cpu_online_map)
#endif
#ifndef node_to_first_cpu
#define node_to_first_cpu(node) (0)
#endif
#ifndef pcibus_to_cpumask
#define pcibus_to_cpumask(bus) (cpu_online_map)
#endif
#endif /* _ASM_GENERIC_TOPOLOGY_H */

View File

@@ -0,0 +1,26 @@
#ifndef _ASM_GENERIC_UACCESS_H_
#define _ASM_GENERIC_UACCESS_H_
/*
* This macro should be used instead of __get_user() when accessing
* values at locations that are not known to be aligned.
*/
#define __get_user_unaligned(x, ptr) \
({ \
__typeof__ (*(ptr)) __x; \
__copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
(x) = __x; \
})
/*
* This macro should be used instead of __put_user() when accessing
* values at locations that are not known to be aligned.
*/
#define __put_user_unaligned(x, ptr) \
({ \
__typeof__ (*(ptr)) __x = (x); \
__copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
})
#endif /* _ASM_GENERIC_UACCESS_H */

View File

@@ -0,0 +1,20 @@
#ifndef _ASM_GENERIC_UNALIGNED_H_
#define _ASM_GENERIC_UNALIGNED_H_
/*
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents.
*/
#include <asm/string.h>
#define get_unaligned(ptr) \
({ __typeof__(*(ptr)) __tmp; memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
#define put_unaligned(val, ptr) \
({ __typeof__(*(ptr)) __tmp = (val); \
memcpy((ptr), &__tmp, sizeof(*(ptr))); \
(void)0; })
#endif /* _ASM_GENERIC_UNALIGNED_H */

View File

@@ -0,0 +1,90 @@
#ifndef LOAD_OFFSET
#define LOAD_OFFSET 0
#endif
#ifndef VMLINUX_SYMBOL
#define VMLINUX_SYMBOL(_sym_) _sym_
#endif
#define RODATA \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
} \
\
.rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
*(.rodata1) \
} \
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
*(.pci_fixup_early) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
*(.pci_fixup_header) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
*(.pci_fixup_final) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
*(.pci_fixup_enable) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
*(__ksymtab) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
*(__ksymtab_gpl) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
*(__kcrctab) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
*(__kcrctab_gpl) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
} \
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
}
#define SECURITY_INIT \
.security_initcall.init : { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
*(.security_initcall.init) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
#define SCHED_TEXT \
VMLINUX_SYMBOL(__sched_text_start) = .; \
*(.sched.text) \
VMLINUX_SYMBOL(__sched_text_end) = .;
#define LOCK_TEXT \
VMLINUX_SYMBOL(__lock_text_start) = .; \
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;

View File

@@ -0,0 +1,718 @@
/*
* include/asm-generic/xor.h
*
* Generic optimized RAID-5 checksumming functions.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* (for example /usr/src/linux/COPYING); if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/processor.h>
static void
xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
long lines = bytes / (sizeof (long)) / 8;
do {
p1[0] ^= p2[0];
p1[1] ^= p2[1];
p1[2] ^= p2[2];
p1[3] ^= p2[3];
p1[4] ^= p2[4];
p1[5] ^= p2[5];
p1[6] ^= p2[6];
p1[7] ^= p2[7];
p1 += 8;
p2 += 8;
} while (--lines > 0);
}
static void
xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
long lines = bytes / (sizeof (long)) / 8;
do {
p1[0] ^= p2[0] ^ p3[0];
p1[1] ^= p2[1] ^ p3[1];
p1[2] ^= p2[2] ^ p3[2];
p1[3] ^= p2[3] ^ p3[3];
p1[4] ^= p2[4] ^ p3[4];
p1[5] ^= p2[5] ^ p3[5];
p1[6] ^= p2[6] ^ p3[6];
p1[7] ^= p2[7] ^ p3[7];
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
}
static void
xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
long lines = bytes / (sizeof (long)) / 8;
do {
p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
}
static void
xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
long lines = bytes / (sizeof (long)) / 8;
do {
p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
}
static void
xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
long lines = bytes / (sizeof (long)) / 8;
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
} while (--lines > 0);
}
static void
xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
long lines = bytes / (sizeof (long)) / 8;
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
}
static void
xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
long lines = bytes / (sizeof (long)) / 8;
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
d0 ^= p4[0];
d1 ^= p4[1];
d2 ^= p4[2];
d3 ^= p4[3];
d4 ^= p4[4];
d5 ^= p4[5];
d6 ^= p4[6];
d7 ^= p4[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
}
static void
xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
long lines = bytes / (sizeof (long)) / 8;
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
d0 ^= p4[0];
d1 ^= p4[1];
d2 ^= p4[2];
d3 ^= p4[3];
d4 ^= p4[4];
d5 ^= p4[5];
d6 ^= p4[6];
d7 ^= p4[7];
d0 ^= p5[0];
d1 ^= p5[1];
d2 ^= p5[2];
d3 ^= p5[3];
d4 ^= p5[4];
d5 ^= p5[5];
d6 ^= p5[6];
d7 ^= p5[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
}
static void
xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
do {
prefetchw(p1+8);
prefetch(p2+8);
once_more:
p1[0] ^= p2[0];
p1[1] ^= p2[1];
p1[2] ^= p2[2];
p1[3] ^= p2[3];
p1[4] ^= p2[4];
p1[5] ^= p2[5];
p1[6] ^= p2[6];
p1[7] ^= p2[7];
p1 += 8;
p2 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
once_more:
p1[0] ^= p2[0] ^ p3[0];
p1[1] ^= p2[1] ^ p3[1];
p1[2] ^= p2[2] ^ p3[2];
p1[3] ^= p2[3] ^ p3[3];
p1[4] ^= p2[4] ^ p3[4];
p1[5] ^= p2[5] ^ p3[5];
p1[6] ^= p2[6] ^ p3[6];
p1[7] ^= p2[7] ^ p3[7];
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
once_more:
p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
prefetch(p5);
do {
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
prefetch(p5+8);
once_more:
p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
prefetchw(p1+8);
prefetch(p2+8);
once_more:
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
once_more:
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
once_more:
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
d0 ^= p4[0];
d1 ^= p4[1];
d2 ^= p4[2];
d3 ^= p4[3];
d4 ^= p4[4];
d5 ^= p4[5];
d6 ^= p4[6];
d7 ^= p4[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static void
xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
prefetch(p2);
prefetch(p3);
prefetch(p4);
prefetch(p5);
do {
register long d0, d1, d2, d3, d4, d5, d6, d7;
prefetchw(p1+8);
prefetch(p2+8);
prefetch(p3+8);
prefetch(p4+8);
prefetch(p5+8);
once_more:
d0 = p1[0]; /* Pull the stuff into registers */
d1 = p1[1]; /* ... in bursts, if possible. */
d2 = p1[2];
d3 = p1[3];
d4 = p1[4];
d5 = p1[5];
d6 = p1[6];
d7 = p1[7];
d0 ^= p2[0];
d1 ^= p2[1];
d2 ^= p2[2];
d3 ^= p2[3];
d4 ^= p2[4];
d5 ^= p2[5];
d6 ^= p2[6];
d7 ^= p2[7];
d0 ^= p3[0];
d1 ^= p3[1];
d2 ^= p3[2];
d3 ^= p3[3];
d4 ^= p3[4];
d5 ^= p3[5];
d6 ^= p3[6];
d7 ^= p3[7];
d0 ^= p4[0];
d1 ^= p4[1];
d2 ^= p4[2];
d3 ^= p4[3];
d4 ^= p4[4];
d5 ^= p4[5];
d6 ^= p4[6];
d7 ^= p4[7];
d0 ^= p5[0];
d1 ^= p5[1];
d2 ^= p5[2];
d3 ^= p5[3];
d4 ^= p5[4];
d5 ^= p5[5];
d6 ^= p5[6];
d7 ^= p5[7];
p1[0] = d0; /* Store the result (in bursts) */
p1[1] = d1;
p1[2] = d2;
p1[3] = d3;
p1[4] = d4;
p1[5] = d5;
p1[6] = d6;
p1[7] = d7;
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
if (lines == 0)
goto once_more;
}
static struct xor_block_template xor_block_8regs = {
.name = "8regs",
.do_2 = xor_8regs_2,
.do_3 = xor_8regs_3,
.do_4 = xor_8regs_4,
.do_5 = xor_8regs_5,
};
static struct xor_block_template xor_block_32regs = {
.name = "32regs",
.do_2 = xor_32regs_2,
.do_3 = xor_32regs_3,
.do_4 = xor_32regs_4,
.do_5 = xor_32regs_5,
};
static struct xor_block_template xor_block_8regs_p = {
.name = "8regs_prefetch",
.do_2 = xor_8regs_p_2,
.do_3 = xor_8regs_p_3,
.do_4 = xor_8regs_p_4,
.do_5 = xor_8regs_p_5,
};
static struct xor_block_template xor_block_32regs_p = {
.name = "32regs_prefetch",
.do_2 = xor_32regs_p_2,
.do_3 = xor_32regs_p_3,
.do_4 = xor_32regs_p_4,
.do_5 = xor_32regs_p_5,
};
#define XOR_TRY_TEMPLATES \
do { \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_8regs_p); \
xor_speed(&xor_block_32regs); \
xor_speed(&xor_block_32regs_p); \
} while (0)