(2006-08-06) rescue-bootcd

This commit is contained in:
2006-08-06 00:00:00 +02:00
parent 2f796b816a
commit decb062d20
21091 changed files with 7076462 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
/*
* include/asm-s390/a.out.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/a.out.h"
* Copyright (C) 1992, Linus Torvalds
*
* I don't think we'll ever need a.out ...
*/
#ifndef __S390_A_OUT_H__
#define __S390_A_OUT_H__
struct exec
{
unsigned long a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#endif
#endif /* __A_OUT_GNU_H__ */

View File

@@ -0,0 +1,203 @@
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
/*
* include/asm-s390/atomic.h
*
* S390 version
* Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow,
* Arnd Bergmann (arndb@de.ibm.com)
*
* Derived from "include/asm-i386/bitops.h"
* Copyright (C) 1992, Linus Torvalds
*
*/
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment
*/
typedef struct {
volatile int counter;
} __attribute__ ((aligned (4))) atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#define __CS_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
new_val; \
})
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t * v)
{
__CS_LOOP(v, i, "ar");
}
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
return __CS_LOOP(v, i, "ar");
}
static __inline__ int atomic_add_negative(int i, atomic_t * v)
{
return __CS_LOOP(v, i, "ar") < 0;
}
static __inline__ void atomic_sub(int i, atomic_t * v)
{
__CS_LOOP(v, i, "sr");
}
static __inline__ void atomic_inc(volatile atomic_t * v)
{
__CS_LOOP(v, 1, "ar");
}
static __inline__ int atomic_inc_return(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "ar");
}
static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "ar") == 0;
}
static __inline__ void atomic_dec(volatile atomic_t * v)
{
__CS_LOOP(v, 1, "sr");
}
static __inline__ int atomic_dec_return(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "sr");
}
static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
{
return __CS_LOOP(v, 1, "sr") == 0;
}
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
{
__CS_LOOP(v, ~mask, "nr");
}
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
{
__CS_LOOP(v, mask, "or");
}
#undef __CS_LOOP
#ifdef __s390x__
typedef struct {
volatile long long counter;
} __attribute__ ((aligned (8))) atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" lg %0,0(%3)\n" \
"0: lgr %1,%0\n" \
op_string " %1,%4\n" \
" csg %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
new_val; \
})
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic64_add(int i, atomic64_t * v)
{
__CSG_LOOP(v, i, "agr");
}
static __inline__ long long atomic64_add_return(int i, atomic64_t * v)
{
return __CSG_LOOP(v, i, "agr");
}
static __inline__ long long atomic64_add_negative(int i, atomic64_t * v)
{
return __CSG_LOOP(v, i, "agr") < 0;
}
static __inline__ void atomic64_sub(int i, atomic64_t * v)
{
__CSG_LOOP(v, i, "sgr");
}
static __inline__ void atomic64_inc(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr") == 0;
}
static __inline__ void atomic64_dec(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "sgr") == 0;
}
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, ~mask, "ngr");
}
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, mask, "ogr");
}
#undef __CSG_LOOP
#endif
/*
returns 0 if expected_oldval==value in *v ( swap was successful )
returns 1 if unsuccessful.
This is non-portable, use bitops or spinlocks instead!
*/
static __inline__ int
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
{
int retval;
__asm__ __volatile__(
" lr %0,%3\n"
" cs %0,%4,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
"0:"
: "=&d" (retval), "=m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val),
"m" (v->counter) : "cc", "memory" );
return retval;
}
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,14 @@
#ifndef _S390_BUG_H
#define _S390_BUG_H
#include <linux/kernel.h>
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__asm__ __volatile__(".long 0"); \
} while (0)
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
#endif

View File

@@ -0,0 +1,22 @@
/*
* include/asm-s390/bugs.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/bugs.h"
* Copyright (C) 1994 Linus Torvalds
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
static void __init check_bugs(void)
{
/* s390 has no bugs ... */
}

View File

@@ -0,0 +1,131 @@
#ifndef _S390_BYTEORDER_H
#define _S390_BYTEORDER_H
/*
* include/asm-s390/byteorder.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <asm/types.h>
#ifdef __GNUC__
#ifdef __s390x__
static __inline__ __u64 ___arch__swab64p(const __u64 *x)
{
__u64 result;
__asm__ __volatile__ (
" lrvg %0,%1"
: "=d" (result) : "m" (*x) );
return result;
}
static __inline__ __u64 ___arch__swab64(__u64 x)
{
__u64 result;
__asm__ __volatile__ (
" lrvgr %0,%1"
: "=d" (result) : "d" (x) );
return result;
}
static __inline__ void ___arch__swab64s(__u64 *x)
{
*x = ___arch__swab64p(x);
}
#endif /* __s390x__ */
static __inline__ __u32 ___arch__swab32p(const __u32 *x)
{
__u32 result;
__asm__ __volatile__ (
#ifndef __s390x__
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "=&d" (result) : "a" (x), "m" (*x) : "cc" );
#else /* __s390x__ */
" lrv %0,%1"
: "=d" (result) : "m" (*x) );
#endif /* __s390x__ */
return result;
}
static __inline__ __u32 ___arch__swab32(__u32 x)
{
#ifndef __s390x__
return ___arch__swab32p(&x);
#else /* __s390x__ */
__u32 result;
__asm__ __volatile__ (
" lrvr %0,%1"
: "=d" (result) : "d" (x) );
return result;
#endif /* __s390x__ */
}
static __inline__ void ___arch__swab32s(__u32 *x)
{
*x = ___arch__swab32p(x);
}
static __inline__ __u16 ___arch__swab16p(const __u16 *x)
{
__u16 result;
__asm__ __volatile__ (
#ifndef __s390x__
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "=&d" (result) : "a" (x), "m" (*x) : "cc" );
#else /* __s390x__ */
" lrvh %0,%1"
: "=d" (result) : "m" (*x) );
#endif /* __s390x__ */
return result;
}
static __inline__ __u16 ___arch__swab16(__u16 x)
{
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
*x = ___arch__swab16p(x);
}
#ifdef __s390x__
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#define __arch__swab64s(x) ___arch__swab64s(x)
#endif /* __s390x__ */
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
#define __arch__swab16p(x) ___arch__swab16p(x)
#define __arch__swab32s(x) ___arch__swab32s(x)
#define __arch__swab16s(x) ___arch__swab16s(x)
#ifndef __s390x__
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#else /* __s390x__ */
#define __BYTEORDER_HAS_U64__
#endif /* __s390x__ */
#endif /* __GNUC__ */
#include <linux/byteorder/big_endian.h>
#endif /* _S390_BYTEORDER_H */

View File

@@ -0,0 +1,20 @@
/*
* include/asm-s390/cache.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/cache.h"
* Copyright (C) 1992, Linus Torvalds
*/
#ifndef __ARCH_S390_CACHE_H
#define __ARCH_S390_CACHE_H
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
#define L1_CACHE_SHIFT_MAX 8 /* largest L1 which this arch supports */
#define ARCH_KMALLOC_MINALIGN 8
#endif

View File

@@ -0,0 +1,26 @@
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the s390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* _S390_CACHEFLUSH_H */

View File

@@ -0,0 +1,176 @@
/*
* include/asm-s390/ccwdev.h
* include/asm-s390x/ccwdev.h
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Arnd Bergmann <arndb@de.ibm.com>
*
* Interface for CCW device drivers
*/
#ifndef _S390_CCWDEV_H_
#define _S390_CCWDEV_H_
#include <linux/device.h>
#include <linux/mod_devicetable.h>
/* structs from asm/cio.h */
struct irb;
struct ccw1;
/* simplified initializers for struct ccw_device:
* CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
* entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
#define CCW_DEVICE(cu, cum) \
.cu_type=(cu), .cu_model=(cum), \
.match_flags=(CCW_DEVICE_ID_MATCH_CU_TYPE \
| (cum ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0))
#define CCW_DEVICE_DEVTYPE(cu, cum, dev, devm) \
.cu_type=(cu), .cu_model=(cum), .dev_type=(dev), .dev_model=(devm),\
.match_flags=CCW_DEVICE_ID_MATCH_CU_TYPE \
| ((cum) ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0) \
| CCW_DEVICE_ID_MATCH_DEVICE_TYPE \
| ((devm) ? CCW_DEVICE_ID_MATCH_DEVICE_MODEL : 0)
/* scan through an array of device ids and return the first
* entry that matches the device.
*
* the array must end with an entry containing zero match_flags
*/
static inline const struct ccw_device_id *
ccw_device_id_match(const struct ccw_device_id *array,
const struct ccw_device_id *match)
{
const struct ccw_device_id *id = array;
for (id = array; id->match_flags; id++) {
if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_TYPE)
&& (id->cu_type != match->cu_type))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_MODEL)
&& (id->cu_model != match->cu_model))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_TYPE)
&& (id->dev_type != match->dev_type))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_MODEL)
&& (id->dev_model != match->dev_model))
continue;
return id;
}
return 0;
}
/* The struct ccw device is our replacement for the globally accessible
* ioinfo array. ioinfo will mutate into a subchannel device later.
*
* Reference: Documentation/s390/driver-model.txt */
struct ccw_device {
spinlock_t *ccwlock;
struct ccw_device_private *private; /* cio private information */
struct ccw_device_id id; /* id of this device, driver_info is
set by ccw_find_driver */
struct ccw_driver *drv; /* */
struct device dev; /* */
int online;
/* This is sick, but a driver can have different interrupt handlers
for different ccw_devices (multi-subchannel drivers)... */
void (*handler) (struct ccw_device *, unsigned long, struct irb *);
};
/* Each ccw driver registers with the ccw root bus */
struct ccw_driver {
struct module *owner; /* for automatic MOD_INC_USE_COUNT */
struct ccw_device_id *ids; /* probe driver with these devs */
int (*probe) (struct ccw_device *); /* ask driver to probe dev */
void (*remove) (struct ccw_device *);
/* device is no longer available */
int (*set_online) (struct ccw_device *);
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
struct device_driver driver; /* higher level structure, don't init
this from your driver */
char *name;
};
extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
const char *bus_id);
/* devices drivers call these during module load and unload.
* When a driver is registered, its probe method is called
* when new devices for its type pop up */
extern int ccw_driver_register (struct ccw_driver *driver);
extern void ccw_driver_unregister (struct ccw_driver *driver);
struct ccw1;
extern int ccw_device_set_options(struct ccw_device *, unsigned long);
/* Allow for i/o completion notification after primary interrupt status. */
#define CCWDEV_EARLY_NOTIFICATION 0x0001
/* Report all interrupt conditions. */
#define CCWDEV_REPORT_ALL 0x0002
/* Try to perform path grouping. */
#define CCWDEV_DO_PATHGROUP 0x0004
/* Allow forced onlining of boxed devices. */
#define CCWDEV_ALLOW_FORCE 0x0008
/*
* ccw_device_start()
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* Depending on the action taken, ccw_device_start() returns:
* 0 - Success
* -EBUSY - Device busy, or status pending
* -ENODEV - Device not operational
* -EINVAL - Device invalid for operation
*/
extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long);
/*
* ccw_device_start_timeout()
*
* This function notifies the device driver if the channel program has not
* completed during the specified time. If a timeout occurs, the channel
* program is terminated via xsch(), hsch() or csch().
*/
extern int ccw_device_start_timeout(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long, int);
extern int ccw_device_resume(struct ccw_device *);
extern int ccw_device_halt(struct ccw_device *, unsigned long);
extern int ccw_device_clear(struct ccw_device *, unsigned long);
extern int read_dev_chars(struct ccw_device *cdev, void **buffer, int length);
extern int read_conf_data(struct ccw_device *cdev, void **buffer, int *length);
extern int ccw_device_set_online(struct ccw_device *cdev);
extern int ccw_device_set_offline(struct ccw_device *cdev);
extern struct ciw *ccw_device_get_ciw(struct ccw_device *, __u32 cmd);
extern __u8 ccw_device_get_path_mask(struct ccw_device *);
#define get_ccwdev_lock(x) (x)->ccwlock
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
// FIXME: these have to go
extern int _ccw_device_get_device_number(struct ccw_device *);
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
extern struct device *s390_root_dev_register(const char *);
extern void s390_root_dev_unregister(struct device *);
#endif /* _S390_CCWDEV_H_ */

View File

@@ -0,0 +1,45 @@
#ifndef S390_CCWGROUP_H
#define S390_CCWGROUP_H
struct ccw_device;
struct ccw_driver;
struct ccwgroup_device {
unsigned long creator_id; /* unique number of the driver */
enum {
CCWGROUP_OFFLINE,
CCWGROUP_ONLINE,
} state;
atomic_t onoff;
unsigned int count; /* number of attached slave devices */
struct device dev; /* master device */
struct ccw_device *cdev[0]; /* variable number, allocate as needed */
};
struct ccwgroup_driver {
struct module *owner;
char *name;
int max_slaves;
unsigned long driver_id;
int (*probe) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
struct device_driver driver; /* this driver */
};
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
extern int ccwgroup_create (struct device *root,
unsigned int creator_id,
struct ccw_driver *gdrv,
int argc, char *argv[]);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev)
#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver)
#endif

View File

@@ -0,0 +1,264 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
/*
* include/asm-s390/checksum.h
* S390 fast network checksum routines
* see also arch/S390/lib/checksum.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Ulrich Hild (first version)
* Martin Schwidefsky (heavily optimized CKSM version)
* D.J. Barrow (third attempt)
*/
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
static inline unsigned int
csum_partial(const unsigned char * buff, int len, unsigned int sum)
{
/*
* Experiments with ethernet and slip connections show that buf
* is aligned on either a 2-byte or 4-byte boundary.
*/
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff;
rp.subreg.odd = (unsigned long) len;
__asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return sum;
}
/*
* csum_partial as an inline function
*/
static inline unsigned int
csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
{
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff;
rp.subreg.odd = (unsigned long) len;
__asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return sum;
}
/*
* the same as csum_partial_copy, but copies from user space.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
static inline unsigned int
csum_partial_copy_from_user(const char __user *src, char *dst,
int len, unsigned int sum,
int *err_ptr)
{
int missing;
missing = copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
}
return csum_partial(dst, len, sum);
}
static inline unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
{
memcpy(dst,src,len);
return csum_partial_inline(dst, len, sum);
}
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline unsigned short
csum_fold(unsigned int sum)
{
#ifndef __s390x__
register_pair rp;
__asm__ __volatile__ (
" slr %N1,%N1\n" /* %0 = H L */
" lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
" srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
" alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
" alr %0,%1\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum), "=d" (rp) : : "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" sr 3,3\n" /* %0 = H*65536 + L */
" lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */
" srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */
" alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */
" alr %0,2\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum) : : "cc", "2", "3");
#endif /* __s390x__ */
return ((unsigned short) ~sum);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
*/
static inline unsigned short
ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
unsigned long sum;
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) iph;
rp.subreg.odd = (unsigned long) ihl*4;
__asm__ __volatile__ (
" sr %0,%0\n" /* set sum to zero */
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "=&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */
__asm__ __volatile__ (
" slgr %0,%0\n" /* set sum to zero */
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on ints */
" jo 0b\n"
: "=&d" (sum)
: "d" (iph), "d" (ihl*4)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return csum_fold(sum);
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 32-bit checksum
*/
static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
{
#ifndef __s390x__
__asm__ __volatile__ (
" alr %0,%1\n" /* sum += saddr */
" brc 12,0f\n"
" ahi %0,1\n" /* add carry */
"0:"
: "+&d" (sum) : "d" (saddr) : "cc" );
__asm__ __volatile__ (
" alr %0,%1\n" /* sum += daddr */
" brc 12,1f\n"
" ahi %0,1\n" /* add carry */
"1:"
: "+&d" (sum) : "d" (daddr) : "cc" );
__asm__ __volatile__ (
" alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */
" brc 12,2f\n"
" ahi %0,1\n" /* add carry */
"2:"
: "+&d" (sum)
: "d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgfr %0,%0\n"
" algr %0,%1\n" /* sum += saddr */
" brc 12,0f\n"
" aghi %0,1\n" /* add carry */
"0: algr %0,%2\n" /* sum += daddr */
" brc 12,1f\n"
" aghi %0,1\n" /* add carry */
"1: algfr %0,%3\n" /* sum += (len<<16) + proto */
" brc 12,2f\n"
" aghi %0,1\n" /* add carry */
"2: srlg 0,%0,32\n"
" alr %0,0\n" /* fold to 32 bits */
" brc 12,3f\n"
" ahi %0,1\n" /* add carry */
"3: llgfr %0,%0"
: "+&d" (sum)
: "d" (saddr), "d" (daddr),
"d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc", "0" );
#endif /* __s390x__ */
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline unsigned short int
csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline unsigned short
ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* _S390_CHECKSUM_H */

View File

@@ -0,0 +1,279 @@
/*
* include/asm-s390/cio.h
* include/asm-s390x/cio.h
*
* Common interface for I/O on S/390
*/
#ifndef _ASM_S390_CIO_H_
#define _ASM_S390_CIO_H_
#include <linux/spinlock.h>
#include <asm/types.h>
#ifdef __KERNEL__
#define LPM_ANYPATH 0xff
/*
* subchannel status word
*/
struct scsw {
__u32 key : 4; /* subchannel key */
__u32 sctl : 1; /* suspend control */
__u32 eswf : 1; /* ESW format */
__u32 cc : 2; /* deferred condition code */
__u32 fmt : 1; /* format */
__u32 pfch : 1; /* prefetch */
__u32 isic : 1; /* initial-status interruption control */
__u32 alcc : 1; /* address-limit checking control */
__u32 ssi : 1; /* supress-suspended interruption */
__u32 zcc : 1; /* zero condition code */
__u32 ectl : 1; /* extended control */
__u32 pno : 1; /* path not operational */
__u32 res : 1; /* reserved */
__u32 fctl : 3; /* function control */
__u32 actl : 7; /* activity control */
__u32 stctl : 5; /* status control */
__u32 cpa; /* channel program address */
__u32 dstat : 8; /* device status */
__u32 cstat : 8; /* subchannel status */
__u32 count : 16; /* residual count */
} __attribute__ ((packed));
#define SCSW_FCTL_CLEAR_FUNC 0x1
#define SCSW_FCTL_HALT_FUNC 0x2
#define SCSW_FCTL_START_FUNC 0x4
#define SCSW_ACTL_SUSPENDED 0x1
#define SCSW_ACTL_DEVACT 0x2
#define SCSW_ACTL_SCHACT 0x4
#define SCSW_ACTL_CLEAR_PEND 0x8
#define SCSW_ACTL_HALT_PEND 0x10
#define SCSW_ACTL_START_PEND 0x20
#define SCSW_ACTL_RESUME_PEND 0x40
#define SCSW_STCTL_STATUS_PEND 0x1
#define SCSW_STCTL_SEC_STATUS 0x2
#define SCSW_STCTL_PRIM_STATUS 0x4
#define SCSW_STCTL_INTER_STATUS 0x8
#define SCSW_STCTL_ALERT_STATUS 0x10
#define DEV_STAT_ATTENTION 0x80
#define DEV_STAT_STAT_MOD 0x40
#define DEV_STAT_CU_END 0x20
#define DEV_STAT_BUSY 0x10
#define DEV_STAT_CHN_END 0x08
#define DEV_STAT_DEV_END 0x04
#define DEV_STAT_UNIT_CHECK 0x02
#define DEV_STAT_UNIT_EXCEP 0x01
#define SCHN_STAT_PCI 0x80
#define SCHN_STAT_INCORR_LEN 0x40
#define SCHN_STAT_PROG_CHECK 0x20
#define SCHN_STAT_PROT_CHECK 0x10
#define SCHN_STAT_CHN_DATA_CHK 0x08
#define SCHN_STAT_CHN_CTRL_CHK 0x04
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
/*
* architectured values for first sense byte
*/
#define SNS0_CMD_REJECT 0x80
#define SNS_CMD_REJECT SNS0_CMD_REJEC
#define SNS0_INTERVENTION_REQ 0x40
#define SNS0_BUS_OUT_CHECK 0x20
#define SNS0_EQUIPMENT_CHECK 0x10
#define SNS0_DATA_CHECK 0x08
#define SNS0_OVERRUN 0x04
#define SNS0_INCOMPL_DOMAIN 0x01
/*
* architectured values for second sense byte
*/
#define SNS1_PERM_ERR 0x80
#define SNS1_INV_TRACK_FORMAT 0x40
#define SNS1_EOC 0x20
#define SNS1_MESSAGE_TO_OPER 0x10
#define SNS1_NO_REC_FOUND 0x08
#define SNS1_FILE_PROTECTED 0x04
#define SNS1_WRITE_INHIBITED 0x02
#define SNS1_INPRECISE_END 0x01
/*
* architectured values for third sense byte
*/
#define SNS2_REQ_INH_WRITE 0x80
#define SNS2_CORRECTABLE 0x40
#define SNS2_FIRST_LOG_ERR 0x20
#define SNS2_ENV_DATA_PRESENT 0x10
#define SNS2_INPRECISE_END 0x04
struct ccw1 {
__u8 cmd_code; /* command code */
__u8 flags; /* flags, like IDA addressing, etc. */
__u16 count; /* byte count */
__u32 cda; /* data address */
} __attribute__ ((packed,aligned(8)));
#define CCW_FLAG_DC 0x80
#define CCW_FLAG_CC 0x40
#define CCW_FLAG_SLI 0x20
#define CCW_FLAG_SKIP 0x10
#define CCW_FLAG_PCI 0x08
#define CCW_FLAG_IDA 0x04
#define CCW_FLAG_SUSPEND 0x02
#define CCW_CMD_READ_IPL 0x02
#define CCW_CMD_NOOP 0x03
#define CCW_CMD_BASIC_SENSE 0x04
#define CCW_CMD_TIC 0x08
#define CCW_CMD_STLCK 0x14
#define CCW_CMD_SENSE_PGID 0x34
#define CCW_CMD_SUSPEND_RECONN 0x5B
#define CCW_CMD_RDC 0x64
#define CCW_CMD_RELEASE 0x94
#define CCW_CMD_SET_PGID 0xAF
#define CCW_CMD_SENSE_ID 0xE4
#define CCW_CMD_DCTL 0xF3
#define SENSE_MAX_COUNT 0x20
struct erw {
__u32 res0 : 3; /* reserved */
__u32 auth : 1; /* Authorization check */
__u32 pvrf : 1; /* path-verification-required flag */
__u32 cpt : 1; /* channel-path timeout */
__u32 fsavf : 1; /* Failing storage address validity flag */
__u32 cons : 1; /* concurrent-sense */
__u32 scavf : 1; /* Secondary ccw address validity flag */
__u32 fsaf : 1; /* Failing storage address format */
__u32 scnt : 6; /* sense count if cons == 1 */
__u32 res16 : 16; /* reserved */
} __attribute__ ((packed));
/*
* subchannel logout area
*/
struct sublog {
__u32 res0 : 1; /* reserved */
__u32 esf : 7; /* extended status flags */
__u32 lpum : 8; /* last path used mask */
__u32 arep : 1; /* ancillary report */
__u32 fvf : 5; /* field-validity flags */
__u32 sacc : 2; /* storage access code */
__u32 termc : 2; /* termination code */
__u32 devsc : 1; /* device-status check */
__u32 serr : 1; /* secondary error */
__u32 ioerr : 1; /* i/o-error alert */
__u32 seqc : 3; /* sequence code */
} __attribute__ ((packed));
/*
* Format 0 Extended Status Word (ESW)
*/
struct esw0 {
struct sublog sublog; /* subchannel logout */
struct erw erw; /* extended report word */
__u32 faddr[2]; /* failing storage address */
__u32 saddr; /* secondary ccw address */
} __attribute__ ((packed));
/*
* Format 1 Extended Status Word (ESW)
*/
struct esw1 {
__u8 zero0; /* reserved zeros */
__u8 lpum; /* last path used mask */
__u16 zero16; /* reserved zeros */
struct erw erw; /* extended report word */
__u32 zeros[3]; /* 2 fullwords of zeros */
} __attribute__ ((packed));
/*
* Format 2 Extended Status Word (ESW)
*/
struct esw2 {
__u8 zero0; /* reserved zeros */
__u8 lpum; /* last path used mask */
__u16 dcti; /* device-connect-time interval */
struct erw erw; /* extended report word */
__u32 zeros[3]; /* 2 fullwords of zeros */
} __attribute__ ((packed));
/*
* Format 3 Extended Status Word (ESW)
*/
struct esw3 {
__u8 zero0; /* reserved zeros */
__u8 lpum; /* last path used mask */
__u16 res; /* reserved */
struct erw erw; /* extended report word */
__u32 zeros[3]; /* 2 fullwords of zeros */
} __attribute__ ((packed));
/*
* interruption response block
*/
struct irb {
struct scsw scsw; /* subchannel status word */
union { /* extended status word, 4 formats */
struct esw0 esw0;
struct esw1 esw1;
struct esw2 esw2;
struct esw3 esw3;
} esw;
__u8 ecw[32]; /* extended control word */
} __attribute__ ((packed,aligned(4)));
/*
* command information word (CIW) layout
*/
struct ciw {
__u32 et : 2; /* entry type */
__u32 reserved : 2; /* reserved */
__u32 ct : 4; /* command type */
__u32 cmd : 8; /* command */
__u32 count : 16; /* coun */
} __attribute__ ((packed));
#define CIW_TYPE_RCD 0x0 /* read configuration data */
#define CIW_TYPE_SII 0x1 /* set interface identifier */
#define CIW_TYPE_RNI 0x2 /* read node identifier */
/*
* Flags used as input parameters for do_IO()
*/
#define DOIO_ALLOW_SUSPEND 0x0001 /* allow for channel prog. suspend */
#define DOIO_DENY_PREFETCH 0x0002 /* don't allow for CCW prefetch */
#define DOIO_SUPPRESS_INTER 0x0004 /* suppress intermediate inter. */
/* ... for suspended CCWs */
/* Device or subchannel gone. */
#define CIO_GONE 0x0001
/* No path to device. */
#define CIO_NO_PATH 0x0002
/* Device has appeared. */
#define CIO_OPER 0x0004
/* Sick revalidation of device. */
#define CIO_REVALIDATE 0x0008
struct diag210 {
__u16 vrdcdvno : 16; /* device number (input) */
__u16 vrdclen : 16; /* data block length (input) */
__u32 vrdcvcla : 8; /* virtual device class (output) */
__u32 vrdcvtyp : 8; /* virtual device type (output) */
__u32 vrdcvsta : 8; /* virtual device status (output) */
__u32 vrdcvfla : 8; /* virtual device flags (output) */
__u32 vrdcrccl : 8; /* real device class (output) */
__u32 vrdccrty : 8; /* real device type (output) */
__u32 vrdccrmd : 8; /* real device model (output) */
__u32 vrdccrft : 8; /* real device feature (output) */
} __attribute__ ((packed,aligned(4)));
extern int diag210(struct diag210 *addr);
extern void wait_cons_dev(void);
#endif
#endif

View File

@@ -0,0 +1,98 @@
#ifndef S390_CMB_H
#define S390_CMB_H
/**
* struct cmbdata -- channel measurement block data for user space
*
* @size: size of the stored data
* @ssch_rsch_count: XXX
* @sample_count:
* @device_connect_time:
* @function_pending_time:
* @device_disconnect_time:
* @control_unit_queuing_time:
* @device_active_only_time:
* @device_busy_time:
* @initial_command_response_time:
*
* all values are stored as 64 bit for simplicity, especially
* in 32 bit emulation mode. All time values are normalized to
* nanoseconds.
* Currently, two formats are known, which differ by the size of
* this structure, i.e. the last two members are only set when
* the extended channel measurement facility (first shipped in
* z990 machines) is activated.
* Potentially, more fields could be added, which results in a
* new ioctl number.
**/
struct cmbdata {
__u64 size;
__u64 elapsed_time;
/* basic and exended format: */
__u64 ssch_rsch_count;
__u64 sample_count;
__u64 device_connect_time;
__u64 function_pending_time;
__u64 device_disconnect_time;
__u64 control_unit_queuing_time;
__u64 device_active_only_time;
/* extended format only: */
__u64 device_busy_time;
__u64 initial_command_response_time;
};
/* enable channel measurement */
#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
/* enable channel measurement */
#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
/* reset channel measurement block */
#define BIODASDRESETCMB _IO(DASD_IOCTL_LETTER,34)
/* read channel measurement data */
#define BIODASDREADCMB _IOWR(DASD_IOCTL_LETTER,32,u64)
/* read channel measurement data */
#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)
#ifdef __KERNEL__
/**
* enable_cmf() - switch on the channel measurement for a specific device
* @cdev: The ccw device to be enabled
* returns 0 for success or a negative error value.
*
* Context:
* non-atomic
**/
extern int enable_cmf(struct ccw_device *cdev);
/**
* disable_cmf() - switch off the channel measurement for a specific device
* @cdev: The ccw device to be disabled
* returns 0 for success or a negative error value.
*
* Context:
* non-atomic
**/
extern int disable_cmf(struct ccw_device *cdev);
/**
* cmf_read() - read one value from the current channel measurement block
* @cmf: the channel to be read
* @index: the name of the value that is read
*
* Context:
* any
**/
extern u64 cmf_read(struct ccw_device *cdev, int index);
/**
* cmf_readall() - read one value from the current channel measurement block
* @cmf: the channel to be read
* @data: a pointer to a data block that will be filled
*
* Context:
* any
**/
extern int cmf_readall(struct ccw_device *cdev, struct cmbdata*data);
extern void cmf_reset(struct ccw_device *cdev);
#endif /* __KERNEL__ */
#endif /* S390_CMB_H */

View File

@@ -0,0 +1,198 @@
#ifndef _ASM_S390X_COMPAT_H
#define _ASM_S390X_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 compat_uid_t;
typedef u16 compat_gid_t;
typedef u32 compat_uid32_t;
typedef u32 compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
compat_uid_t st_uid;
compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
struct compat_statfs {
s32 f_type;
s32 f_bsize;
s32 f_blocks;
s32 f_bfree;
s32 f_bavail;
s32 f_files;
s32 f_ffree;
compat_fsid_t f_fsid;
s32 f_namelen;
s32 f_frsize;
s32 f_spare[6];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
}
static inline void __user *compat_alloc_user_space(long len)
{
unsigned long stack;
stack = KSTK_ESP(current);
if (test_thread_flag(TIF_31BIT))
stack &= 0x7fffffffUL;
return (void __user *) (stack - len);
}
struct compat_ipc64_perm {
compat_key_t key;
compat_uid32_t uid;
compat_gid32_t gid;
compat_uid32_t cuid;
compat_gid32_t cgid;
compat_mode_t mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned int __unused1;
unsigned int __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __pad1;
compat_time_t sem_ctime;
compat_ulong_t __pad2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __pad1;
compat_time_t msg_rtime;
compat_ulong_t __pad2;
compat_time_t msg_ctime;
compat_ulong_t __pad3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __pad1;
compat_time_t shm_dtime;
compat_ulong_t __pad2;
compat_time_t shm_ctime;
compat_ulong_t __pad3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
#endif /* _ASM_S390X_COMPAT_H */

View File

@@ -0,0 +1,14 @@
/*
* arch/s390/kernel/cpcmd.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#ifndef __CPCMD__
#define __CPCMD__
extern void cpcmd(char *cmd, char *response, int rlen);
#endif

View File

@@ -0,0 +1,23 @@
/*
* include/asm-s390/current.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/current.h"
*/
#ifndef _S390_CURRENT_H
#define _S390_CURRENT_H
#ifdef __KERNEL__
#include <asm/lowcore.h>
struct task_struct;
#define current ((struct task_struct *const)S390_lowcore.current_task)
#endif
#endif /* !(_S390_CURRENT_H) */

View File

@@ -0,0 +1,268 @@
/*
* File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
* This file is the interface of the DASD device driver, which is exported to user space
* any future changes wrt the API will result in a change of the APIVERSION reported
* to userspace by the DASDAPIVER-ioctl
*
* $Revision: 1.6 $
*
*/
#ifndef DASD_H
#define DASD_H
#include <linux/ioctl.h>
#define DASD_IOCTL_LETTER 'D'
#define DASD_API_VERSION 6
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
unsigned int format; /* format info like formatted/cdl/ldl/... */
unsigned int features; /* dasd features like 'ro',... */
unsigned int reserved0; /* reserved for further use ,... */
unsigned int reserved1; /* reserved for further use ,... */
unsigned int reserved2; /* reserved for further use ,... */
unsigned int reserved3; /* reserved for further use ,... */
unsigned int reserved4; /* reserved for further use ,... */
unsigned int reserved5; /* reserved for further use ,... */
unsigned int reserved6; /* reserved for further use ,... */
unsigned int reserved7; /* reserved for further use ,... */
} dasd_information2_t;
/*
* values to be used for dasd_information_t.format
* 0x00: NOT formatted
* 0x01: Linux disc layout
* 0x02: Common disc layout
*/
#define DASD_FORMAT_NONE 0
#define DASD_FORMAT_LDL 1
#define DASD_FORMAT_CDL 2
/*
* values to be used for dasd_information_t.features
* 0x00: default features
* 0x01: readonly (ro)
* 0x02: use diag discipline (diag)
*/
#define DASD_FEATURE_DEFAULT 0
#define DASD_FEATURE_READONLY 1
#define DASD_FEATURE_USEDIAG 2
#define DASD_PARTN_BITS 2
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
/*
* Read Subsystem Data - Perfomance Statistics
*/
typedef struct dasd_rssd_perf_stats_t {
unsigned char invalid:1;
unsigned char format:3;
unsigned char data_format:4;
unsigned char unit_address;
unsigned short device_status;
unsigned int nr_read_normal;
unsigned int nr_read_normal_hits;
unsigned int nr_write_normal;
unsigned int nr_write_fast_normal_hits;
unsigned int nr_read_seq;
unsigned int nr_read_seq_hits;
unsigned int nr_write_seq;
unsigned int nr_write_fast_seq_hits;
unsigned int nr_read_cache;
unsigned int nr_read_cache_hits;
unsigned int nr_write_cache;
unsigned int nr_write_fast_cache_hits;
unsigned int nr_inhibit_cache;
unsigned int nr_bybass_cache;
unsigned int nr_seq_dasd_to_cache;
unsigned int nr_dasd_to_cache;
unsigned int nr_cache_to_dasd;
unsigned int nr_delayed_fast_write;
unsigned int nr_normal_fast_write;
unsigned int nr_seq_fast_write;
unsigned int nr_cache_miss;
unsigned char status2;
unsigned int nr_quick_write_promotes;
unsigned char reserved;
unsigned short ssid;
unsigned char reseved2[96];
} __attribute__((packed)) dasd_rssd_perf_stats_t;
/*
* struct profile_info_t
* holds the profinling information
*/
typedef struct dasd_profile_info_t {
unsigned int dasd_io_reqs; /* number of requests processed at all */
unsigned int dasd_io_sects; /* number of sectors processed at all */
unsigned int dasd_io_secs[32]; /* histogram of request's sizes */
unsigned int dasd_io_times[32]; /* histogram of requests's times */
unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */
unsigned int dasd_io_time1[32]; /* histogram of time from build to start */
unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */
unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */
unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */
unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */
} dasd_profile_info_t;
/*
* struct format_data_t
* represents all data necessary to format a dasd
*/
typedef struct format_data_t {
int start_unit; /* from track */
int stop_unit; /* to track */
int blksize; /* sectorsize */
int intensity;
} format_data_t;
/*
* values to be used for format_data_t.intensity
* 0/8: normal format
* 1/9: also write record zero
* 3/11: also write home address
* 4/12: invalidate track
*/
#define DASD_FMT_INT_FMT_R0 1 /* write record zero */
#define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct attrib_data_t
* represents the operation (cache) bits for the device.
* Used in DE to influence caching of the DASD.
*/
typedef struct attrib_data_t {
unsigned char operation:3; /* cache operation mode */
unsigned char reserved:5; /* cache operation mode */
__u16 nr_cyl; /* no of cyliners for read ahaed */
__u8 reserved2[29]; /* for future use */
} __attribute__ ((packed)) attrib_data_t;
/* definition of operation (cache) bits within attributes of DE */
#define DASD_NORMAL_CACHE 0x0
#define DASD_BYPASS_CACHE 0x1
#define DASD_INHIBIT_LOAD 0x2
#define DASD_SEQ_ACCESS 0x3
#define DASD_SEQ_PRESTAGE 0x4
#define DASD_REC_ACCESS 0x5
/********************************************************************************
* SECTION: Definition of IOCTLs
*
* Here ist how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
*******************************************************************************/
/* Disable the volume (for Linux) */
#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0)
/* Enable the volume (for Linux) */
#define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1)
/* Issue a reserve/release command, rsp. */
#define BIODASDRSRV _IO(DASD_IOCTL_LETTER,2) /* reserve */
#define BIODASDRLSE _IO(DASD_IOCTL_LETTER,3) /* release */
#define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */
/* reset profiling information of a device */
#define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5)
/* Quiesce IO on device */
#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6)
/* Resume IO on device */
#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7)
/* retrieve API version number */
#define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int)
/* Get information on a dasd device */
#define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t)
/* retrieve profiling information of a device */
#define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t)
/* Get information on a dasd device (enhanced) */
#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t)
/* Performance Statistics Read */
#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
/* Get Attributes (cache operations) */
#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t)
/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
#endif /* DASD_H */
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 4
* c-brace-imaginary-offset: 0
* c-brace-offset: -4
* c-argdecl-indent: 4
* c-label-offset: -4
* c-continued-statement-offset: 4
* c-continued-brace-offset: 0
* indent-tabs-mode: nil
* tab-width: 8
* End:
*/

View File

@@ -0,0 +1,249 @@
/*
* include/asm-s390/debug.h
* S/390 debug facility
*
* Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
*/
#ifndef DEBUG_H
#define DEBUG_H
#include <linux/string.h>
/* Note:
* struct __debug_entry must be defined outside of #ifdef __KERNEL__
* in order to allow a user program to analyze the 'raw'-view.
*/
struct __debug_entry{
union {
struct {
unsigned long long clock:52;
unsigned long long exception:1;
unsigned long long level:3;
unsigned long long cpuid:8;
} fields;
unsigned long long stck;
} id;
void* caller;
} __attribute__((packed));
#define __DEBUG_FEATURE_VERSION 1 /* version of debug feature */
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
#define DEBUG_MAX_PROCF_LEN 16 /* max length for a proc file name */
#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
/* the entry information */
#define STCK(x) asm volatile ("STCK 0(%1)" : "=m" (x) : "a" (&(x)) : "cc")
typedef struct __debug_entry debug_entry_t;
struct debug_view;
typedef struct debug_info {
struct debug_info* next;
struct debug_info* prev;
atomic_t ref_count;
spinlock_t lock;
int level;
int nr_areas;
int page_order;
int buf_size;
int entry_size;
debug_entry_t** areas;
int active_area;
int *active_entry;
struct proc_dir_entry* proc_root_entry;
struct proc_dir_entry* proc_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_PROCF_LEN];
} debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id,
struct debug_view* view,
int area,
debug_entry_t* entry,
char* out_buf);
typedef int (debug_format_proc_t) (debug_info_t* id,
struct debug_view* view, char* out_buf,
const char* in_buf);
typedef int (debug_prolog_proc_t) (debug_info_t* id,
struct debug_view* view,
char* out_buf);
typedef int (debug_input_proc_t) (debug_info_t* id,
struct debug_view* view,
struct file* file,
const char __user *user_buf,
size_t in_buf_size, loff_t* offset);
int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
int area, debug_entry_t* entry, char* out_buf);
struct debug_view {
char name[DEBUG_MAX_PROCF_LEN];
debug_prolog_proc_t* prolog_proc;
debug_header_proc_t* header_proc;
debug_format_proc_t* format_proc;
debug_input_proc_t* input_proc;
void* private_data;
};
extern struct debug_view debug_hex_ascii_view;
extern struct debug_view debug_raw_view;
extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
debug_entry_t* debug_event_common(debug_info_t* id, int level,
const void* data, int length);
debug_entry_t* debug_exception_common(debug_info_t* id, int level,
const void* data, int length);
/* Debug Feature API: */
debug_info_t* debug_register(char* name, int pages_index, int nr_areas,
int buf_size);
void debug_unregister(debug_info_t* id);
void debug_set_level(debug_info_t* id, int new_level);
void debug_stop_all(void);
extern inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
if ((!id) || (level > id->level)) return NULL;
return debug_event_common(id,level,data,length);
}
extern inline debug_entry_t*
debug_int_event(debug_info_t* id, int level, unsigned int tag)
{
unsigned int t=tag;
if ((!id) || (level > id->level)) return NULL;
return debug_event_common(id,level,&t,sizeof(unsigned int));
}
extern inline debug_entry_t *
debug_long_event (debug_info_t* id, int level, unsigned long tag)
{
unsigned long t=tag;
if ((!id) || (level > id->level)) return NULL;
return debug_event_common(id,level,&t,sizeof(unsigned long));
}
extern inline debug_entry_t*
debug_text_event(debug_info_t* id, int level, const char* txt)
{
if ((!id) || (level > id->level)) return NULL;
return debug_event_common(id,level,txt,strlen(txt));
}
extern debug_entry_t *
debug_sprintf_event(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
extern inline debug_entry_t*
debug_exception(debug_info_t* id, int level, void* data, int length)
{
if ((!id) || (level > id->level)) return NULL;
return debug_exception_common(id,level,data,length);
}
extern inline debug_entry_t*
debug_int_exception(debug_info_t* id, int level, unsigned int tag)
{
unsigned int t=tag;
if ((!id) || (level > id->level)) return NULL;
return debug_exception_common(id,level,&t,sizeof(unsigned int));
}
extern inline debug_entry_t *
debug_long_exception (debug_info_t* id, int level, unsigned long tag)
{
unsigned long t=tag;
if ((!id) || (level > id->level)) return NULL;
return debug_exception_common(id,level,&t,sizeof(unsigned long));
}
extern inline debug_entry_t*
debug_text_exception(debug_info_t* id, int level, const char* txt)
{
if ((!id) || (level > id->level)) return NULL;
return debug_exception_common(id,level,txt,strlen(txt));
}
extern debug_entry_t *
debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
/*
define the debug levels:
- 0 No debugging output to console or syslog
- 1 Log internal errors to syslog, ignore check conditions
- 2 Log internal errors and check conditions to syslog
- 3 Log internal errors to console, log check conditions to syslog
- 4 Log internal errors and check conditions to console
- 5 panic on internal errors, log check conditions to console
- 6 panic on both, internal errors and check conditions
*/
#ifndef DEBUG_LEVEL
#define DEBUG_LEVEL 4
#endif
#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
#if DEBUG_LEVEL > 0
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
#else
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */
#undef DEBUG_MALLOC
#ifdef DEBUG_MALLOC
void *b;
#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
#define get_zeroed_page(x...) (PRINT_INFO(" gfp %p\n",b=get_zeroed_page(x)),b)
#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
#endif /* DEBUG_MALLOC */
#endif /* __KERNEL__ */
#endif /* DEBUG_H */

View File

@@ -0,0 +1,22 @@
/*
* include/asm-s390/delay.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/delay.h"
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/s390/lib/delay.c
*/
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
extern void __udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
#define udelay(n) __udelay(n)
#endif /* defined(_S390_DELAY_H) */

View File

@@ -0,0 +1,49 @@
#ifndef __S390_DIV64
#define __S390_DIV64
#ifndef __s390x__
/* for do_div "base" needs to be smaller than 2^31-1 */
#define do_div(n, base) ({ \
unsigned long long __n = (n); \
unsigned long __r; \
\
asm (" slr 0,0\n" \
" l 1,%1\n" \
" srdl 0,1\n" \
" dr 0,%2\n" \
" alr 1,1\n" \
" alr 0,0\n" \
" lhi 2,1\n" \
" n 2,%1\n" \
" alr 0,2\n" \
" clr 0,%2\n" \
" jl 0f\n" \
" slr 0,%2\n" \
" ahi 1,1\n" \
"0: st 1,%1\n" \
" l 1,4+%1\n" \
" srdl 0,1\n" \
" dr 0,%2\n" \
" alr 1,1\n" \
" alr 0,0\n" \
" lhi 2,1\n" \
" n 2,4+%1\n" \
" alr 0,2\n" \
" clr 0,%2\n" \
" jl 1f\n" \
" slr 0,%2\n" \
" ahi 1,1\n" \
"1: st 1,4+%1\n" \
" lr %0,0" \
: "=d" (__r), "=m" (__n) \
: "d" (base), "m" (__n) : "0", "1", "2", "cc" ); \
(n) = (__n); \
__r; \
})
#else /* __s390x__ */
#include <asm-generic/div64.h>
#endif /* __s390x__ */
#endif

View File

@@ -0,0 +1,14 @@
/*
* include/asm-s390/dma-mapping.h
*
* S390 version
*
* This file exists so that #include <dma-mapping.h> doesn't break anything.
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <asm-generic/dma-mapping-broken.h>
#endif /* _ASM_DMA_MAPPING_H */

View File

@@ -0,0 +1,16 @@
/*
* include/asm-s390/dma.h
*
* S390 version
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h> /* need byte IO */
#define MAX_DMA_ADDRESS 0x80000000
#define free_dma(x)
#endif /* _ASM_DMA_H */

View File

@@ -0,0 +1,49 @@
/*
* include/asm-s390/ebcdic.h
* EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _EBCDIC_H
#define _EBCDIC_H
#ifndef _S390_TYPES_H
#include <types.h>
#endif
extern __u8 _ascebc_500[]; /* ASCII -> EBCDIC 500 conversion table */
extern __u8 _ebcasc_500[]; /* EBCDIC 500 -> ASCII conversion table */
extern __u8 _ascebc[]; /* ASCII -> EBCDIC conversion table */
extern __u8 _ebcasc[]; /* EBCDIC -> ASCII conversion table */
extern __u8 _ebc_tolower[]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
extern __inline__ void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{
if (nr-- <= 0)
return;
__asm__ __volatile__(
" bras 1,1f\n"
" tr 0(1,%0),0(%2)\n"
"0: tr 0(256,%0),0(%2)\n"
" la %0,256(%0)\n"
"1: ahi %1,-256\n"
" jnm 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1" );
}
#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr)
#define ASCEBC_500(addr,nr) codepage_convert(_ascebc_500, addr, nr)
#define EBCASC_500(addr,nr) codepage_convert(_ebcasc_500, addr, nr)
#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr)
#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr)
#endif

View File

@@ -0,0 +1,215 @@
/*
* include/asm-s390/elf.h
*
* S390 version
*
* Derived from "include/asm-i386/elf.h"
*/
#ifndef __ASMS390_ELF_H
#define __ASMS390_ELF_H
/* s390 relocations defined by the ABIs */
#define R_390_NONE 0 /* No reloc. */
#define R_390_8 1 /* Direct 8 bit. */
#define R_390_12 2 /* Direct 12 bit. */
#define R_390_16 3 /* Direct 16 bit. */
#define R_390_32 4 /* Direct 32 bit. */
#define R_390_PC32 5 /* PC relative 32 bit. */
#define R_390_GOT12 6 /* 12 bit GOT offset. */
#define R_390_GOT32 7 /* 32 bit GOT offset. */
#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
#define R_390_COPY 9 /* Copy symbol at runtime. */
#define R_390_GLOB_DAT 10 /* Create GOT entry. */
#define R_390_JMP_SLOT 11 /* Create PLT entry. */
#define R_390_RELATIVE 12 /* Adjust by program base. */
#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
#define R_390_GOT16 15 /* 16 bit GOT offset. */
#define R_390_PC16 16 /* PC relative 16 bit. */
#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
#define R_390_64 22 /* Direct 64 bit. */
#define R_390_PC64 23 /* PC relative 64 bit. */
#define R_390_GOT64 24 /* 64 bit GOT offset. */
#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
#define R_390_TLS_GDCALL 38 /* Tag for function call in general
dynamic TLS code. */
#define R_390_TLS_LDCALL 39 /* Tag for function call in local
dynamic TLS code. */
#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
thread local data. */
#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
thread local data. */
#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
thread local data in LD code. */
#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
thread local data in LD code. */
#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
negated static TLS block offset. */
#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
negated static TLS block offset. */
#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
negated static TLS block offset. */
#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
static TLS block. */
#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
static TLS block. */
#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
block. */
#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
block. */
#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
block. */
#define R_390_20 57 /* Direct 20 bit. */
#define R_390_GOT20 58 /* 20 bit GOT offset. */
#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */
#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS
block offset. */
/* Keep this the last entry. */
#define R_390_NUM 61
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/system.h> /* for save_access_regs */
typedef s390_fp_regs elf_fpregset_t;
typedef s390_regs elf_gregset_t;
/*
* These are used to set parameters in the core dumps.
*/
#ifndef __s390x__
#define ELF_CLASS ELFCLASS32
#else /* __s390x__ */
#define ELF_CLASS ELFCLASS64
#endif /* __s390x__ */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->gprs[14] = 0; \
} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef __s390x__
#define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \
? TASK_SIZE / 3 * 2 \
: 2 * TASK_SIZE / 3)
#else /* __s390x__ */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif /* __s390x__ */
/* Wow, the "main" arch needs arch dependent functions too.. :) */
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different) */
static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
{
memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
save_access_regs(regs->acrs);
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs(regs, &pr_reg);
static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs(tsk, regs)
static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
if (tsk == current)
save_fp_regs(fpregs);
else
memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
return 1;
}
#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#ifndef __s390x__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#else /* __s390x__ */
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
clear_thread_flag(TIF_31BIT); \
} while (0)
#endif /* __s390x__ */
#endif
#endif

View File

@@ -0,0 +1,13 @@
/*
* include/asm-s390/errno.h
*
* S390 version
*
*/
#ifndef _S390_ERRNO_H
#define _S390_ERRNO_H
#include <asm-generic/errno.h>
#endif

View File

@@ -0,0 +1,32 @@
/*
* include/asm-s390x/extmem.h
*
* definitions for external memory segment support
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
*/
#ifndef _ASM_S390X_DCSS_H
#define _ASM_S390X_DCSS_H
#ifndef __ASSEMBLY__
/* possible values for segment type as returned by segment_info */
#define SEG_TYPE_SW 0
#define SEG_TYPE_EW 1
#define SEG_TYPE_SR 2
#define SEG_TYPE_ER 3
#define SEG_TYPE_SN 4
#define SEG_TYPE_EN 5
#define SEG_TYPE_SC 6
#define SEG_TYPE_EWEN 7
#define SEGMENT_SHARED 0
#define SEGMENT_EXCLUSIVE 1
extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
extern void segment_unload(char *name);
extern void segment_save(char *name);
extern int segment_type (char* name);
extern int segment_modify_shared (char *name, int do_nonshared);
#endif
#endif

View File

@@ -0,0 +1,97 @@
/*
* include/asm-s390/fcntl.h
*
* S390 version
*
* Derived from "include/asm-i386/fcntl.h"
*/
#ifndef _S390_FCNTL_H
#define _S390_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 0003
#define O_RDONLY 00
#define O_WRONLY 01
#define O_RDWR 02
#define O_CREAT 0100 /* not fcntl */
#define O_EXCL 0200 /* not fcntl */
#define O_NOCTTY 0400 /* not fcntl */
#define O_TRUNC 01000 /* not fcntl */
#define O_APPEND 02000
#define O_NONBLOCK 04000
#define O_NDELAY O_NONBLOCK
#define O_SYNC 010000
#define FASYNC 020000 /* fcntl, for BSD compatibility */
#define O_DIRECT 040000 /* direct disk access hint */
#define O_LARGEFILE 0100000
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 5
#define F_SETLK 6
#define F_SETLKW 7
#define F_SETOWN 8 /* for sockets. */
#define F_GETOWN 9 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#ifndef __s390x__
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif /* ! __s390x__ */
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
/* for old implementation of bsd flock () */
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
/* for leases */
#define F_INPROGRESS 16
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock */
#define LOCK_READ 64 /* ... Which allows concurrent read operations */
#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
};
#ifndef __s390x__
struct flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
};
#endif
#define F_LINUX_SPECIFIC_BASE 1024
#endif

View File

@@ -0,0 +1,60 @@
/*
* include/asm-s390/hardirq.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* Derived from "include/asm-i386/hardirq.h"
*/
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/sched.h>
#include <linux/cache.h>
#include <asm/lowcore.h>
/* irq_cpustat_t is unused currently, but could be converted
* into a percpu variable instead of storing softirq_pending
* on the lowcore */
typedef struct {
unsigned int __softirq_pending;
} irq_cpustat_t;
#define local_softirq_pending() (S390_lowcore.softirq_pending)
/* this is always called with cpu == smp_processor_id() at the moment */
static inline __u32
softirq_pending(unsigned int cpu)
{
if (cpu == smp_processor_id())
return local_softirq_pending();
return lowcore_ptr[cpu]->softirq_pending;
}
#define __ARCH_IRQ_STAT
#define HARDIRQ_BITS 8
extern void account_ticks(struct pt_regs *);
#define __ARCH_HAS_DO_SOFTIRQ
#define irq_enter() \
do { \
(preempt_count() += HARDIRQ_OFFSET); \
} while(0)
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && local_softirq_pending()) \
/* Use the async. stack for softirq */ \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#endif /* __ASM_HARDIRQ_H */

View File

@@ -0,0 +1,257 @@
/*
* File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
*/
#ifndef _S390_IDALS_H
#define _S390_IDALS_H
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/cio.h>
#include <asm/uaccess.h>
#ifdef __s390x__
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#else
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
#endif
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
/*
* Test if an address/length pair needs an idal list.
*/
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
#ifdef __s390x__
return ((__pa(vaddr) + length - 1) >> 31) != 0;
#else
return 0;
#endif
}
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int
idal_nr_words(void *vaddr, unsigned int length)
{
#ifdef __s390x__
if (idal_is_needed(vaddr, length))
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
#endif
return 0;
}
/*
* Create the list of idal words for an address/length pair.
*/
static inline unsigned long *
idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
{
#ifdef __s390x__
unsigned long paddr;
unsigned int cidaw;
paddr = __pa(vaddr);
cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
paddr &= -IDA_BLOCK_SIZE;
while (--cidaw > 0) {
paddr += IDA_BLOCK_SIZE;
*idaws++ = paddr;
}
#endif
return idaws;
}
/*
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
#ifdef __s390x__
unsigned int nridaws;
unsigned long *idal;
if (ccw->flags & CCW_FLAG_IDA)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
idal = kmalloc(nridaws * sizeof(unsigned long),
GFP_ATOMIC | GFP_DMA );
if (idal == NULL)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
#endif
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
/*
* Releases any allocated IDAL related to the CCW.
*/
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
#ifdef __s390x__
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
#endif
ccw->cda = 0;
}
/*
* Idal buffer extension
*/
struct idal_buffer {
size_t size;
size_t page_order;
void *data[0];
};
/*
* Allocate an idal buffer
*/
static inline struct idal_buffer *
idal_buffer_alloc(size_t size, int page_order)
{
struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
GFP_DMA | GFP_KERNEL);
if (ib == NULL)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
if ((i & (nr_chunks - 1)) != 0) {
ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
continue;
}
ib->data[i] = (void *)
__get_free_pages(GFP_KERNEL, page_order);
if (ib->data[i] != NULL)
continue;
// Not enough memory
while (i >= nr_chunks) {
i -= nr_chunks;
free_pages((unsigned long) ib->data[i],
ib->page_order);
}
kfree(ib);
return ERR_PTR(-ENOMEM);
}
return ib;
}
/*
* Free an idal buffer.
*/
static inline void
idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
for (i = 0; i < nr_ptrs; i += nr_chunks)
free_pages((unsigned long) ib->data[i], ib->page_order);
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef __s390x__
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
return ib->size > (4096ul << ib->page_order);
#endif
}
/*
* Set channel data address to idal buffer.
*/
static inline void
idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
if (__idal_buffer_is_needed(ib)) {
// setup idals;
ccw->cda = (u32)(addr_t) ib->data;
ccw->flags |= CCW_FLAG_IDA;
} else
// we do not need idals - use direct addressing
ccw->cda = (u32)(addr_t) ib->data[0];
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
static inline size_t
idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
{
size_t left;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
to = (void __user *) to + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_to_user(to, ib->data[i], count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
static inline size_t
idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
{
size_t left;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
from = (void __user *) from + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_from_user(ib->data[i], from, count);
}
#endif

View File

@@ -0,0 +1,112 @@
/*
* include/asm-s390/io.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/io.h"
*/
#ifndef _S390_IO_H
#define _S390_IO_H
#ifdef __KERNEL__
#include <linux/vmalloc.h>
#include <asm/page.h>
#define IO_SPACE_LIMIT 0xffffffff
#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x)))
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
unsigned long real_address;
__asm__ (
#ifndef __s390x__
" lra %0,0(%1)\n"
" jz 0f\n"
" sr %0,%0\n"
#else /* __s390x__ */
" lrag %0,0(%1)\n"
" jz 0f\n"
" slgr %0,%0\n"
#endif /* __s390x__ */
"0:"
: "=a" (real_address) : "a" (address) : "cc" );
return real_address;
}
extern inline void * phys_to_virt(unsigned long address)
{
return __io_virt(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
extern void iounmap(void *addr);
/*
* IO bus memory addresses are also 1:1 with the physical address
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently.
*/
#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
#define memcpy_toio(a,b,c) memcpy(__io_virt(a),(b),(c))
#define inb_p(addr) readb(addr)
#define inb(addr) readb(addr)
#define outb(x,addr) ((void) writeb(x,addr))
#define outb_p(x,addr) outb(x,addr)
#define mmiowb()
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,88 @@
/*
* include/asm-s390/ioctl.h
*
* S390 version
*
* Derived from "include/asm-i386/ioctl.h"
*/
#ifndef _S390_IOCTL_H
#define _S390_IOCTL_H
/* ioctl command encoding: 32 bits total, command in lower 16 bits,
* size of the parameter structure in the lower 14 bits of the
* upper 16 bits.
* Encoding the size of the parameter structure in the ioctl request
* is useful for catching programs compiled with old versions
* and to avoid overwriting user space outside the user buffer area.
* The highest 2 bits are reserved for indicating the ``access mode''.
* NOTE: This limits the max parameter size to 16kB -1 !
*/
/*
* The following is for compatibility across the various Linux
* platforms. The i386 ioctl numbering scheme doesn't really enforce
* a type field. De facto, however, the top 8 bits of the lower 16
* bits are indeed used as a type field, so we might just as well make
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
#define _IOC_SIZEBITS 14
#define _IOC_DIRBITS 2
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
/*
* Direction bits.
*/
#define _IOC_NONE 0U
#define _IOC_WRITE 1U
#define _IOC_READ 2U
#define _IOC(dir,type,nr,size) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
/* provoke compile error for invalid uses of size argument */
extern unsigned long __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
/* ...and for the drivers/sound files... */
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
#endif /* _S390_IOCTL_H */

View File

@@ -0,0 +1,88 @@
/*
* include/asm-s390/ioctls.h
*
* S390 version
*
* Derived from "include/asm-i386/ioctls.h"
*/
#ifndef __ARCH_S390_IOCTLS_H__
#define __ARCH_S390_IOCTLS_H__
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define FIOQSIZE 0x545E
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif

View File

@@ -0,0 +1,40 @@
/*
* include/asm-s390/ipc.h
*
* S390 version
*
* Derived from "include/asm-i386/ipc.h"
*/
#ifndef __s390_IPC_H__
#define __s390_IPC_H__
/*
* These are used to wrap system calls on S390.
*
* See arch/s390/kernel/sys_s390.c for ugly details..
*/
struct ipc_kludge {
struct msgbuf __user *msgp;
long msgtyp;
};
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
#define MSGCTL 14
#define SHMAT 21
#define SHMDT 22
#define SHMGET 23
#define SHMCTL 24
/* Used by the DIPC package, try and avoid reusing it */
#define DIPC 25
#define IPCCALL(version,op) ((version)<<16 | (op))
#endif

View File

@@ -0,0 +1,31 @@
#ifndef __S390_IPCBUF_H__
#define __S390_IPCBUF_H__
/*
* The user_ipc_perm structure for S/390 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned short __pad1;
unsigned short seq;
#ifndef __s390x__
unsigned short __pad2;
#endif /* ! __s390x__ */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __S390_IPCBUF_H__ */

View File

@@ -0,0 +1,30 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
#ifdef __KERNEL__
#include <linux/hardirq.h>
/*
* the definition of irqs has changed in 2.5.46:
* NR_IRQS is no longer the number of i/o
* interrupts (65536), but rather the number
* of interrupt classes (2).
* Only external and i/o interrupts make much sense here (CH).
*/
enum interruption_class {
EXTERNAL_INTERRUPT,
IO_INTERRUPT,
NR_IRQS,
};
#define touch_nmi_watchdog() do { } while(0)
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,23 @@
#ifdef __KERNEL__
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif
#endif /* __KERNEL__ */

View File

@@ -0,0 +1,6 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
/* Nothing to see here... */
#endif

View File

@@ -0,0 +1,59 @@
#ifndef _ASM_LOCAL_H
#define _ASM_LOCAL_H
#include <linux/config.h>
#include <linux/percpu.h>
#include <asm/atomic.h>
#ifndef __s390x__
typedef atomic_t local_t;
#define LOCAL_INIT(i) ATOMIC_INIT(i)
#define local_read(v) atomic_read(v)
#define local_set(v,i) atomic_set(v,i)
#define local_inc(v) atomic_inc(v)
#define local_dec(v) atomic_dec(v)
#define local_add(i, v) atomic_add(i, v)
#define local_sub(i, v) atomic_sub(i, v)
#else
typedef atomic64_t local_t;
#define LOCAL_INIT(i) ATOMIC64_INIT(i)
#define local_read(v) atomic64_read(v)
#define local_set(v,i) atomic64_set(v,i)
#define local_inc(v) atomic64_inc(v)
#define local_dec(v) atomic64_dec(v)
#define local_add(i, v) atomic64_add(i, v)
#define local_sub(i, v) atomic64_sub(i, v)
#endif
#define __local_inc(v) ((v)->counter++)
#define __local_dec(v) ((v)->counter--)
#define __local_add(i,v) ((v)->counter+=(i))
#define __local_sub(i,v) ((v)->counter-=(i))
/*
* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
#define cpu_local_read(v) local_read(&__get_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
#endif /* _ASM_LOCAL_H */

View File

@@ -0,0 +1,320 @@
/*
* include/asm-s390/lowcore.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#ifndef _ASM_S390_LOWCORE_H
#define _ASM_S390_LOWCORE_H
#ifndef __s390x__
#define __LC_EXT_OLD_PSW 0x018
#define __LC_SVC_OLD_PSW 0x020
#define __LC_PGM_OLD_PSW 0x028
#define __LC_MCK_OLD_PSW 0x030
#define __LC_IO_OLD_PSW 0x038
#define __LC_EXT_NEW_PSW 0x058
#define __LC_SVC_NEW_PSW 0x060
#define __LC_PGM_NEW_PSW 0x068
#define __LC_MCK_NEW_PSW 0x070
#define __LC_IO_NEW_PSW 0x078
#else /* !__s390x__ */
#define __LC_EXT_OLD_PSW 0x0130
#define __LC_SVC_OLD_PSW 0x0140
#define __LC_PGM_OLD_PSW 0x0150
#define __LC_MCK_OLD_PSW 0x0160
#define __LC_IO_OLD_PSW 0x0170
#define __LC_EXT_NEW_PSW 0x01b0
#define __LC_SVC_NEW_PSW 0x01c0
#define __LC_PGM_NEW_PSW 0x01d0
#define __LC_MCK_NEW_PSW 0x01e0
#define __LC_IO_NEW_PSW 0x01f0
#endif /* !__s390x__ */
#define __LC_EXT_PARAMS 0x080
#define __LC_CPU_ADDRESS 0x084
#define __LC_EXT_INT_CODE 0x086
#define __LC_SVC_ILC 0x088
#define __LC_SVC_INT_CODE 0x08A
#define __LC_PGM_ILC 0x08C
#define __LC_PGM_INT_CODE 0x08E
#define __LC_PER_ATMID 0x096
#define __LC_PER_ADDRESS 0x098
#define __LC_PER_ACCESS_ID 0x0A1
#define __LC_SUBCHANNEL_ID 0x0B8
#define __LC_SUBCHANNEL_NR 0x0BA
#define __LC_IO_INT_PARM 0x0BC
#define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8
#define __LC_RETURN_PSW 0x200
#define __LC_IRB 0x210
#define __LC_DIAG44_OPCODE 0x250
#define __LC_SAVE_AREA 0xC00
#ifndef __s390x__
#define __LC_KERNEL_STACK 0xC40
#define __LC_THREAD_INFO 0xC44
#define __LC_ASYNC_STACK 0xC48
#define __LC_KERNEL_ASCE 0xC4C
#define __LC_USER_ASCE 0xC50
#define __LC_PANIC_STACK 0xC54
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#define __LC_CURRENT 0xC90
#define __LC_INT_CLOCK 0xC98
#else /* __s390x__ */
#define __LC_KERNEL_STACK 0xD40
#define __LC_THREAD_INFO 0xD48
#define __LC_ASYNC_STACK 0xD50
#define __LC_KERNEL_ASCE 0xD58
#define __LC_USER_ASCE 0xD60
#define __LC_PANIC_STACK 0xD68
#define __LC_CPUID 0xD90
#define __LC_CPUADDR 0xD98
#define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDe8
#endif /* __s390x__ */
#define __LC_PANIC_MAGIC 0xE00
#ifndef __s390x__
#define __LC_PFAULT_INTPARM 0x080
#define __LC_AREGS_SAVE_AREA 0x120
#define __LC_CREGS_SAVE_AREA 0x1C0
#else /* __s390x__ */
#define __LC_PFAULT_INTPARM 0x11B8
#define __LC_AREGS_SAVE_AREA 0x1340
#define __LC_CREGS_SAVE_AREA 0x1380
#endif /* __s390x__ */
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <asm/processor.h>
#include <linux/types.h>
#include <asm/sigp.h>
void restart_int_handler(void);
void ext_int_handler(void);
void system_call(void);
void pgm_check_handler(void);
void mcck_int_handler(void);
void io_int_handler(void);
struct _lowcore
{
#ifndef __s390x__
/* prefix area: defined by architecture */
psw_t restart_psw; /* 0x000 */
__u32 ccw2[4]; /* 0x008 */
psw_t external_old_psw; /* 0x018 */
psw_t svc_old_psw; /* 0x020 */
psw_t program_old_psw; /* 0x028 */
psw_t mcck_old_psw; /* 0x030 */
psw_t io_old_psw; /* 0x038 */
__u8 pad1[0x58-0x40]; /* 0x040 */
psw_t external_new_psw; /* 0x058 */
psw_t svc_new_psw; /* 0x060 */
psw_t program_new_psw; /* 0x068 */
psw_t mcck_new_psw; /* 0x070 */
psw_t io_new_psw; /* 0x078 */
__u32 ext_params; /* 0x080 */
__u16 cpu_addr; /* 0x084 */
__u16 ext_int_code; /* 0x086 */
__u16 svc_ilc; /* 0x088 */
__u16 svc_code; /* 0x08a */
__u16 pgm_ilc; /* 0x08c */
__u16 pgm_code; /* 0x08e */
__u32 trans_exc_code; /* 0x090 */
__u16 mon_class_num; /* 0x094 */
__u16 per_perc_atmid; /* 0x096 */
__u32 per_address; /* 0x098 */
__u32 monitor_code; /* 0x09c */
__u8 exc_access_id; /* 0x0a0 */
__u8 per_access_id; /* 0x0a1 */
__u8 pad2[0xB8-0xA2]; /* 0x0a2 */
__u16 subchannel_id; /* 0x0b8 */
__u16 subchannel_nr; /* 0x0ba */
__u32 io_int_parm; /* 0x0bc */
__u32 io_int_word; /* 0x0c0 */
__u8 pad3[0xD8-0xC4]; /* 0x0c4 */
__u32 cpu_timer_save_area[2]; /* 0x0d8 */
__u32 clock_comp_save_area[2]; /* 0x0e0 */
__u32 mcck_interruption_code[2]; /* 0x0e8 */
__u8 pad4[0xf4-0xf0]; /* 0x0f0 */
__u32 external_damage_code; /* 0x0f4 */
__u32 failing_storage_address; /* 0x0f8 */
__u8 pad5[0x100-0xfc]; /* 0x0fc */
__u32 st_status_fixed_logout[4];/* 0x100 */
__u8 pad6[0x120-0x110]; /* 0x110 */
__u32 access_regs_save_area[16];/* 0x120 */
__u32 floating_pt_save_area[8]; /* 0x160 */
__u32 gpregs_save_area[16]; /* 0x180 */
__u32 cregs_save_area[16]; /* 0x1c0 */
psw_t return_psw; /* 0x200 */
__u8 irb[64]; /* 0x208 */
__u8 pad8[0xc00-0x248]; /* 0x248 */
/* System info area */
__u32 save_area[16]; /* 0xc00 */
__u32 kernel_stack; /* 0xc40 */
__u32 thread_info; /* 0xc44 */
__u32 async_stack; /* 0xc48 */
__u32 kernel_asce; /* 0xc4c */
__u32 user_asce; /* 0xc50 */
__u32 panic_stack; /* 0xc54 */
__u8 pad10[0xc60-0xc58]; /* 0xc58 */
/* entry.S sensitive area start */
struct cpuinfo_S390 cpu_data; /* 0xc60 */
__u32 ipl_device; /* 0xc7c */
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xc80 */
__u32 ext_call_fast; /* 0xc88 */
__u32 percpu_offset; /* 0xc8c */
__u32 current_task; /* 0xc90 */
__u32 softirq_pending; /* 0xc94 */
__u64 int_clock; /* 0xc98 */
__u8 pad11[0xe00-0xca0]; /* 0xca0 */
/* 0xe00 is used as indicator for dump tools */
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
/* Align to the top 1k of prefix area */
__u8 pad12[0x1000-0xe04]; /* 0xe04 */
#else /* !__s390x__ */
/* prefix area: defined by architecture */
__u32 ccw1[2]; /* 0x000 */
__u32 ccw2[4]; /* 0x008 */
__u8 pad1[0x80-0x18]; /* 0x018 */
__u32 ext_params; /* 0x080 */
__u16 cpu_addr; /* 0x084 */
__u16 ext_int_code; /* 0x086 */
__u16 svc_ilc; /* 0x088 */
__u16 svc_code; /* 0x08a */
__u16 pgm_ilc; /* 0x08c */
__u16 pgm_code; /* 0x08e */
__u32 data_exc_code; /* 0x090 */
__u16 mon_class_num; /* 0x094 */
__u16 per_perc_atmid; /* 0x096 */
addr_t per_address; /* 0x098 */
__u8 exc_access_id; /* 0x0a0 */
__u8 per_access_id; /* 0x0a1 */
__u8 op_access_id; /* 0x0a2 */
__u8 ar_access_id; /* 0x0a3 */
__u8 pad2[0xA8-0xA4]; /* 0x0a4 */
addr_t trans_exc_code; /* 0x0A0 */
addr_t monitor_code; /* 0x09c */
__u16 subchannel_id; /* 0x0b8 */
__u16 subchannel_nr; /* 0x0ba */
__u32 io_int_parm; /* 0x0bc */
__u32 io_int_word; /* 0x0c0 */
__u8 pad3[0xc8-0xc4]; /* 0x0c4 */
__u32 stfl_fac_list; /* 0x0c8 */
__u8 pad4[0xe8-0xcc]; /* 0x0cc */
__u32 mcck_interruption_code[2]; /* 0x0e8 */
__u8 pad5[0xf4-0xf0]; /* 0x0f0 */
__u32 external_damage_code; /* 0x0f4 */
addr_t failing_storage_address; /* 0x0f8 */
__u8 pad6[0x120-0x100]; /* 0x100 */
psw_t restart_old_psw; /* 0x120 */
psw_t external_old_psw; /* 0x130 */
psw_t svc_old_psw; /* 0x140 */
psw_t program_old_psw; /* 0x150 */
psw_t mcck_old_psw; /* 0x160 */
psw_t io_old_psw; /* 0x170 */
__u8 pad7[0x1a0-0x180]; /* 0x180 */
psw_t restart_psw; /* 0x1a0 */
psw_t external_new_psw; /* 0x1b0 */
psw_t svc_new_psw; /* 0x1c0 */
psw_t program_new_psw; /* 0x1d0 */
psw_t mcck_new_psw; /* 0x1e0 */
psw_t io_new_psw; /* 0x1f0 */
psw_t return_psw; /* 0x200 */
__u8 irb[64]; /* 0x210 */
__u32 diag44_opcode; /* 0x250 */
__u8 pad8[0xc00-0x254]; /* 0x254 */
/* System info area */
__u64 save_area[16]; /* 0xc00 */
__u8 pad9[0xd40-0xc80]; /* 0xc80 */
__u64 kernel_stack; /* 0xd40 */
__u64 thread_info; /* 0xd48 */
__u64 async_stack; /* 0xd50 */
__u64 kernel_asce; /* 0xd58 */
__u64 user_asce; /* 0xd60 */
__u64 panic_stack; /* 0xd68 */
__u8 pad10[0xd80-0xd70]; /* 0xd70 */
/* entry.S sensitive area start */
struct cpuinfo_S390 cpu_data; /* 0xd80 */
__u32 ipl_device; /* 0xdb8 */
__u32 pad11; /* 0xdbc */
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */
__u64 percpu_offset; /* 0xdd0 */
__u64 current_task; /* 0xdd8 */
__u64 softirq_pending; /* 0xde0 */
__u64 int_clock; /* 0xde8 */
__u8 pad12[0xe00-0xdf0]; /* 0xdf0 */
/* 0xe00 is used as indicator for dump tools */
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
__u8 pad13[0x1200-0xe04]; /* 0xe04 */
/* System info area */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */
__u8 pad14[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
__u8 pad15[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u8 pad16[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
/* align to the top of the prefix area */
__u8 pad17[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */
} __attribute__((packed)); /* End structure*/
#define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
extern __inline__ void set_prefix(__u32 address)
{
__asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" );
}
#define __PANIC_MAGIC 0xDEADC0DE
#endif
#endif

View File

@@ -0,0 +1,29 @@
/*
* arch/s390/kernel/mathemu.h
* IEEE floating point emulation.
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef __MATHEMU__
#define __MATHEMU__
extern int math_emu_b3(__u8 *, struct pt_regs *);
extern int math_emu_ed(__u8 *, struct pt_regs *);
extern int math_emu_ldr(__u8 *);
extern int math_emu_ler(__u8 *);
extern int math_emu_std(__u8 *, struct pt_regs *);
extern int math_emu_ld(__u8 *, struct pt_regs *);
extern int math_emu_ste(__u8 *, struct pt_regs *);
extern int math_emu_le(__u8 *, struct pt_regs *);
extern int math_emu_lfpc(__u8 *, struct pt_regs *);
extern int math_emu_stfpc(__u8 *, struct pt_regs *);
extern int math_emu_srnm(__u8 *, struct pt_regs *);
#endif /* __MATHEMU__ */

View File

@@ -0,0 +1,51 @@
/*
* include/asm-s390/mman.h
*
* S390 version
*
* Derived from "include/asm-i386/mman.h"
*/
#ifndef __S390_MMAN_H__
#define __S390_MMAN_H__
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
#define MS_SYNC 4 /* synchronous memory sync */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#define MADV_NORMAL 0x0 /* default page-in behavior */
#define MADV_RANDOM 0x1 /* page-in minimum required */
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FILE 0
#endif /* __S390_MMAN_H__ */

View File

@@ -0,0 +1,7 @@
#ifndef __MMU_H
#define __MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#endif

View File

@@ -0,0 +1,54 @@
/*
* include/asm-s390/mmu_context.h
*
* S390 version
*
* Derived from "include/asm-i386/mmu_context.h"
*/
#ifndef __S390_MMU_CONTEXT_H
#define __S390_MMU_CONTEXT_H
/*
* get a new mmu context.. S390 don't know about contexts.
*/
#define init_new_context(tsk,mm) 0
#define destroy_context(mm) do { } while (0)
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev != next) {
#ifndef __s390x__
S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) |
(_SEGMENT_TABLE|USER_STD_MASK);
/* Load home space page table origin. */
asm volatile("lctl 13,13,%0"
: : "m" (S390_lowcore.user_asce) );
#else /* __s390x__ */
S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
(_REGION_TABLE|USER_STD_MASK);
/* Load home space page table origin. */
asm volatile("lctlg 13,13,%0"
: : "m" (S390_lowcore.user_asce) );
#endif /* __s390x__ */
}
cpu_set(smp_processor_id(), next->cpu_vm_mask);
}
#define deactivate_mm(tsk,mm) do { } while (0)
extern inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
set_fs(current->thread.mm_segment);
}
#endif

View File

@@ -0,0 +1,46 @@
#ifndef _ASM_S390_MODULE_H
#define _ASM_S390_MODULE_H
/*
* This file contains the s390 architecture specific module code.
*/
struct mod_arch_syminfo
{
unsigned long got_offset;
unsigned long plt_offset;
int got_initialized;
int plt_initialized;
};
struct mod_arch_specific
{
/* Starting offset of got in the module core memory. */
unsigned long got_offset;
/* Starting offset of plt in the module core memory. */
unsigned long plt_offset;
/* Size of the got. */
unsigned long got_size;
/* Size of the plt. */
unsigned long plt_size;
/* Number of symbols in syminfo. */
int nsyms;
/* Additional symbol information (got and plt offsets). */
struct mod_arch_syminfo *syminfo;
};
#ifdef __s390x__
#define ElfW(x) Elf64_ ## x
#define ELFW(x) ELF64_ ## x
#else
#define ElfW(x) Elf32_ ## x
#define ELFW(x) ELF32_ ## x
#endif
#define Elf_Addr ElfW(Addr)
#define Elf_Rela ElfW(Rela)
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
#define Elf_Ehdr ElfW(Ehdr)
#define ELF_R_SYM ELFW(R_SYM)
#define ELF_R_TYPE ELFW(R_TYPE)
#endif /* _ASM_S390_MODULE_H */

View File

@@ -0,0 +1,37 @@
#ifndef _S390_MSGBUF_H
#define _S390_MSGBUF_H
/*
* The msqid64_ds structure for S/390 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
#ifndef __s390x__
unsigned long __unused1;
#endif /* ! __s390x__ */
__kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef __s390x__
unsigned long __unused2;
#endif /* ! __s390x__ */
__kernel_time_t msg_ctime; /* last change time */
#ifndef __s390x__
unsigned long __unused3;
#endif /* ! __s390x__ */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* _S390_MSGBUF_H */

View File

@@ -0,0 +1,21 @@
/*
* include/asm-s390/namei.h
*
* S390 version
*
* Derived from "include/asm-i386/namei.h"
*
* Included from linux/fs/namei.c
*/
#ifndef __S390_NAMEI_H
#define __S390_NAMEI_H
/* This dummy routine maybe changed to something useful
* for /usr/gnemul/ emulation stuff.
* Look at asm-sparc/namei.h for details.
*/
#define __emul_prefix() NULL
#endif /* __S390_NAMEI_H */

View File

@@ -0,0 +1,205 @@
/*
* include/asm-s390/page.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
*/
#ifndef _S390_PAGE_H
#define _S390_PAGE_H
#include <asm/setup.h>
#include <asm/types.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifndef __s390x__
static inline void clear_page(void *page)
{
register_pair rp;
rp.subreg.even = (unsigned long) page;
rp.subreg.odd = (unsigned long) 4096;
asm volatile (" slr 1,1\n"
" mvcl %0,0"
: "+&a" (rp) : : "memory", "cc", "1" );
}
static inline void copy_page(void *to, void *from)
{
if (MACHINE_HAS_MVPG)
asm volatile (" sr 0,0\n"
" mvpg %0,%1"
: : "a" ((void *)(to)), "a" ((void *)(from))
: "memory", "cc", "0" );
else
asm volatile (" mvc 0(256,%0),0(%1)\n"
" mvc 256(256,%0),256(%1)\n"
" mvc 512(256,%0),512(%1)\n"
" mvc 768(256,%0),768(%1)\n"
" mvc 1024(256,%0),1024(%1)\n"
" mvc 1280(256,%0),1280(%1)\n"
" mvc 1536(256,%0),1536(%1)\n"
" mvc 1792(256,%0),1792(%1)\n"
" mvc 2048(256,%0),2048(%1)\n"
" mvc 2304(256,%0),2304(%1)\n"
" mvc 2560(256,%0),2560(%1)\n"
" mvc 2816(256,%0),2816(%1)\n"
" mvc 3072(256,%0),3072(%1)\n"
" mvc 3328(256,%0),3328(%1)\n"
" mvc 3584(256,%0),3584(%1)\n"
" mvc 3840(256,%0),3840(%1)\n"
: : "a"((void *)(to)),"a"((void *)(from))
: "memory" );
}
#else /* __s390x__ */
static inline void clear_page(void *page)
{
asm volatile (" lgr 2,%0\n"
" lghi 3,4096\n"
" slgr 1,1\n"
" mvcl 2,0"
: : "a" ((void *) (page))
: "memory", "cc", "1", "2", "3" );
}
static inline void copy_page(void *to, void *from)
{
if (MACHINE_HAS_MVPG)
asm volatile (" sgr 0,0\n"
" mvpg %0,%1"
: : "a" ((void *)(to)), "a" ((void *)(from))
: "memory", "cc", "0" );
else
asm volatile (" mvc 0(256,%0),0(%1)\n"
" mvc 256(256,%0),256(%1)\n"
" mvc 512(256,%0),512(%1)\n"
" mvc 768(256,%0),768(%1)\n"
" mvc 1024(256,%0),1024(%1)\n"
" mvc 1280(256,%0),1280(%1)\n"
" mvc 1536(256,%0),1536(%1)\n"
" mvc 1792(256,%0),1792(%1)\n"
" mvc 2048(256,%0),2048(%1)\n"
" mvc 2304(256,%0),2304(%1)\n"
" mvc 2560(256,%0),2560(%1)\n"
" mvc 2816(256,%0),2816(%1)\n"
" mvc 3072(256,%0),3072(%1)\n"
" mvc 3328(256,%0),3328(%1)\n"
" mvc 3584(256,%0),3584(%1)\n"
" mvc 3840(256,%0),3840(%1)\n"
: : "a"((void *)(to)),"a"((void *)(from))
: "memory" );
}
#endif /* __s390x__ */
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/* Pure 2^n version of get_order */
extern __inline__ int get_order(unsigned long size)
{
int order;
size = (size-1) >> (PAGE_SHIFT-1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
#define pte_val(x) ((x).pte)
#define pgprot_val(x) ((x).pgprot)
#ifndef __s390x__
typedef struct { unsigned long pmd; } pmd_t;
typedef struct {
unsigned long pgd0;
unsigned long pgd1;
unsigned long pgd2;
unsigned long pgd3;
} pgd_t;
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd0)
#else /* __s390x__ */
typedef struct {
unsigned long pmd0;
unsigned long pmd1;
} pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pmd_val(x) ((x).pmd0)
#define pmd_val1(x) ((x).pmd1)
#define pgd_val(x) ((x).pgd)
#endif /* __s390x__ */
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
/* default storage key used for all pages */
extern unsigned int default_storage_key;
static inline void
page_set_storage_key(unsigned long addr, unsigned int skey)
{
asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) );
}
static inline unsigned int
page_get_storage_key(unsigned long addr)
{
unsigned int skey;
asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) );
return skey;
}
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(unsigned long)(x)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#endif /* _S390_PAGE_H */

View File

@@ -0,0 +1,30 @@
/*
* include/asm-s390/param.h
*
* S390 version
*
* Derived from "include/asm-i386/param.h"
*/
#ifndef _ASMS390_PARAM_H
#define _ASMS390_PARAM_H
#ifdef __KERNEL__
# define HZ 100 /* Internal kernel timer frequency */
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
#ifndef HZ
#define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif

View File

@@ -0,0 +1,10 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
#endif /* __ASM_S390_PCI_H */

View File

@@ -0,0 +1,70 @@
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
#include <linux/compiler.h>
#include <asm/lowcore.h>
#define __GENERIC_PER_CPU
/*
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
* For 64 bit module code s390 forces the use of a GOT slot for the
* address of the per cpu variable. This is needed because the module
* may be more than 4G above the per cpu area.
*/
#if defined(__s390x__) && defined(MODULE)
#define __reloc_hide(var,offset) \
(*({ unsigned long *__ptr; \
asm ( "larl %0,per_cpu__"#var"@GOTENT" \
: "=a" (__ptr) : "X" (per_cpu__##var) ); \
(typeof(&per_cpu__##var))((*__ptr) + (offset)); }))
#else
#define __reloc_hide(var, offset) \
(*({ unsigned long __ptr; \
asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \
(typeof(&per_cpu__##var)) (__ptr + (offset)); }))
#endif
#ifdef CONFIG_SMP
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \
__typeof__(type) per_cpu__##name
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
for (__i = 0; __i < NR_CPUS; __i++) \
if (cpu_possible(__i)) \
memcpy((pcpudst)+__per_cpu_offset[__i], \
(src), (size)); \
} while (0)
#else /* ! SMP */
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
#define __get_cpu_var(var) __reloc_hide(var,0)
#define per_cpu(var,cpu) __reloc_hide(var,0)
#endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#endif /* __ARCH_S390_PERCPU__ */

View File

@@ -0,0 +1,167 @@
/*
* include/asm-s390/pgalloc.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/pgalloc.h"
* Copyright (C) 1994 Linus Torvalds
*/
#ifndef _S390_PGALLOC_H
#define _S390_PGALLOC_H
#include <linux/config.h>
#include <asm/processor.h>
#include <linux/threads.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#define check_pgt_cache() do {} while (0)
extern void diag10(unsigned long addr);
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
int i;
#ifndef __s390x__
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (pgd != NULL)
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
#else /* __s390x__ */
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
if (pgd != NULL)
for (i = 0; i < PTRS_PER_PGD; i++)
pgd_clear(pgd + i);
#endif /* __s390x__ */
return pgd;
}
static inline void pgd_free(pgd_t *pgd)
{
#ifndef __s390x__
free_pages((unsigned long) pgd, 1);
#else /* __s390x__ */
free_pages((unsigned long) pgd, 2);
#endif /* __s390x__ */
}
#ifndef __s390x__
/*
* page middle directory allocation/free routines.
* We use pmd cache only on s390x, so these are dummy routines. This
* code never triggers because the pgd will always be present.
*/
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#else /* __s390x__ */
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pmd_t *pmd;
int i;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2);
if (pmd != NULL) {
for (i=0; i < PTRS_PER_PMD; i++)
pmd_clear(pmd+i);
}
return pmd;
}
static inline void pmd_free (pmd_t *pmd)
{
free_pages((unsigned long) pmd, 2);
}
#define __pmd_free_tlb(tlb,pmd) \
do { \
tlb_flush_mmu(tlb, 0, 0); \
pmd_free(pmd); \
} while (0)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
}
#endif /* __s390x__ */
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifndef __s390x__
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
#else /* __s390x__ */
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
#endif /* __s390x__ */
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
}
/*
* page table entry allocation/free routines.
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte;
int i;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
if (pte)
return virt_to_page(pte);
return 0;
}
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long) pte);
}
static inline void pte_free(struct page *pte)
{
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
/*
* This establishes kernel virtual mappings (e.g., as a result of a
* vmalloc call). Since s390-esame uses a separate kernel page table,
* there is nothing to do here... :)
*/
#define set_pgdir(addr,entry) do { } while(0)
#endif /* _S390_PGALLOC_H */

View File

@@ -0,0 +1,802 @@
/*
* include/asm-s390/pgtable.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (weigand@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/pgtable.h"
*/
#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H
/*
* The Linux memory management assumes a three-level page table setup. For
* s390 31 bit we "fold" the mid level into the top-level page table, so
* that we physically have the same two-level page table as the s390 mmu
* expects in 31 bit mode. For s390 64 bit we use three of the five levels
* the hardware provides (region first and region second tables are not
* used).
*
* The "pgd_xxx()" functions are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*
* This file contains the functions and defines necessary to modify and use
* the S390 page table tree.
*/
#ifndef __ASSEMBLY__
#include <asm/bug.h>
#include <asm/processor.h>
#include <linux/threads.h>
struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
/*
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
#define update_mmu_cache(vma, address, pte) do { } while (0)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
/*
* PMD_SHIFT determines the size of the area a second-level page
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#ifndef __s390x__
# define PMD_SHIFT 22
# define PGDIR_SHIFT 22
#else /* __s390x__ */
# define PMD_SHIFT 21
# define PGDIR_SHIFT 31
#endif /* __s390x__ */
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* entries per page directory level: the S390 is two-level, so
* we don't really have any PMD directory physically.
* for S390 segment-table entries are combined to one PGD
* that leads to 1024 pte per pgd
*/
#ifndef __s390x__
# define PTRS_PER_PTE 1024
# define PTRS_PER_PMD 1
# define PTRS_PER_PGD 512
#else /* __s390x__ */
# define PTRS_PER_PTE 512
# define PTRS_PER_PMD 1024
# define PTRS_PER_PGD 2048
#endif /* __s390x__ */
/*
* pgd entries used up by user/kernel:
*/
#ifndef __s390x__
# define USER_PTRS_PER_PGD 512
# define USER_PGD_PTRS 512
# define KERNEL_PGD_PTRS 512
# define FIRST_USER_PGD_NR 0
#else /* __s390x__ */
# define USER_PTRS_PER_PGD 2048
# define USER_PGD_PTRS 2048
# define KERNEL_PGD_PTRS 2048
# define FIRST_USER_PGD_NR 0
#endif /* __s390x__ */
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
#ifndef __ASSEMBLY__
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
& ~(VMALLOC_OFFSET-1))
#ifndef __s390x__
# define VMALLOC_END (0x7fffffffL)
#else /* __s390x__ */
# define VMALLOC_END (0x40000000000L)
#endif /* __s390x__ */
/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
* 0 0IP0
* 00000000001111111111222222222233
* 01234567890123456789012345678901
*
* I Page-Invalid Bit: Page is not available for address-translation
* P Page-Protection Bit: Store access not possible for page
*
* A 31 bit segmenttable entry of S390 has following format:
* | P-table origin | |PTL
* 0 IC
* 00000000001111111111222222222233
* 01234567890123456789012345678901
*
* I Segment-Invalid Bit: Segment is not available for address-translation
* C Common-Segment Bit: Segment is not private (PoP 3-30)
* PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
*
* The 31 bit segmenttable origin of S390 has following format:
*
* |S-table origin | | STL |
* X **GPS
* 00000000001111111111222222222233
* 01234567890123456789012345678901
*
* X Space-Switch event:
* G Segment-Invalid Bit: *
* P Private-Space Bit: Segment is not private (PoP 3-30)
* S Storage-Alteration:
* STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
*
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IP0| OS |
* 0000000000111111111122222222223333333333444444444455555555556666
* 0123456789012345678901234567890123456789012345678901234567890123
*
* I Page-Invalid Bit: Page is not available for address-translation
* P Page-Protection Bit: Store access not possible for page
*
* A 64 bit segmenttable entry of S390 has following format:
* | P-table origin | TT
* 0000000000111111111122222222223333333333444444444455555555556666
* 0123456789012345678901234567890123456789012345678901234567890123
*
* I Segment-Invalid Bit: Segment is not available for address-translation
* C Common-Segment Bit: Segment is not private (PoP 3-30)
* P Page-Protection Bit: Store access not possible for page
* TT Type 00
*
* A 64 bit region table entry of S390 has following format:
* | S-table origin | TF TTTL
* 0000000000111111111122222222223333333333444444444455555555556666
* 0123456789012345678901234567890123456789012345678901234567890123
*
* I Segment-Invalid Bit: Segment is not available for address-translation
* TT Type 01
* TF
* TL Table lenght
*
* The 64 bit regiontable origin of S390 has following format:
* | region table origon | DTTL
* 0000000000111111111122222222223333333333444444444455555555556666
* 0123456789012345678901234567890123456789012345678901234567890123
*
* X Space-Switch event:
* G Segment-Invalid Bit:
* P Private-Space Bit:
* S Storage-Alteration:
* R Real space
* TL Table-Length:
*
* A storage key has the following format:
* | ACC |F|R|C|0|
* 0 3 4 5 6 7
* ACC: access key
* F : fetch protection bit
* R : referenced bit
* C : changed bit
*/
/* Hardware bits in the page table entry */
#define _PAGE_RO 0x200 /* HW read-only */
#define _PAGE_INVALID 0x400 /* HW invalid */
/* Mask and four different kinds of invalid pages. */
#define _PAGE_INVALID_MASK 0x601
#define _PAGE_INVALID_EMPTY 0x400
#define _PAGE_INVALID_NONE 0x401
#define _PAGE_INVALID_SWAP 0x600
#define _PAGE_INVALID_FILE 0x601
#ifndef __s390x__
/* Bits in the segment table entry */
#define _PAGE_TABLE_LEN 0xf /* only full page-tables */
#define _PAGE_TABLE_COM 0x10 /* common page-table */
#define _PAGE_TABLE_INV 0x20 /* invalid page-table */
#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
/*
* User and Kernel pagetables are identical
*/
#define _PAGE_TABLE _PAGE_TABLE_LEN
#define _KERNPG_TABLE _PAGE_TABLE_LEN
/*
* The Kernel segment-tables includes the User segment-table
*/
#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100)
#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN
#define USER_STD_MASK 0x00000080UL
#else /* __s390x__ */
/* Bits in the segment table entry */
#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */
#define _PMD_ENTRY 0x00
/* Bits in the region third table entry */
#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */
#define _PGD_ENTRY 0x07
/*
* User and kernel page directory
*/
#define _REGION_THIRD 0x4
#define _REGION_THIRD_LEN 0x3
#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
#define USER_STD_MASK 0x0000000000000080UL
/* Bits in the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#endif /* __s390x__ */
/*
* No mapping available
*/
#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE)
#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE)
#define PAGE_RO_SHARED __pgprot(_PAGE_RO)
#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO)
#define PAGE_COPY __pgprot(_PAGE_RO)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
/*
* The S390 can't do page protection for execute, and considers that the
* same are read. Also, write permissions imply read permissions. This is
* the closest we can get..
*/
/*xwr*/
#define __P000 PAGE_NONE_PRIVATE
#define __P001 PAGE_RO_PRIVATE
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_RO_PRIVATE
#define __P101 PAGE_RO_PRIVATE
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE_SHARED
#define __S001 PAGE_RO_SHARED
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_RO_SHARED
#define __S101 PAGE_RO_SHARED
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
extern inline void set_pte(pte_t *pteptr, pte_t pteval)
{
*pteptr = pteval;
}
/*
* pgd/pmd/pte query functions
*/
#ifndef __s390x__
extern inline int pgd_present(pgd_t pgd) { return 1; }
extern inline int pgd_none(pgd_t pgd) { return 0; }
extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; }
extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
extern inline int pmd_bad(pmd_t pmd)
{
return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE;
}
#else /* __s390x__ */
extern inline int pgd_present(pgd_t pgd)
{
return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY;
}
extern inline int pgd_none(pgd_t pgd)
{
return pgd_val(pgd) & _PGD_ENTRY_INV;
}
extern inline int pgd_bad(pgd_t pgd)
{
return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY;
}
extern inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY;
}
extern inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) & _PMD_ENTRY_INV;
}
extern inline int pmd_bad(pmd_t pmd)
{
return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY;
}
#endif /* __s390x__ */
extern inline int pte_none(pte_t pte)
{
return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY;
}
extern inline int pte_present(pte_t pte)
{
return !(pte_val(pte) & _PAGE_INVALID) ||
(pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE;
}
extern inline int pte_file(pte_t pte)
{
return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE;
}
#define pte_same(a,b) (pte_val(a) == pte_val(b))
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
*/
extern inline int pte_write(pte_t pte)
{
return (pte_val(pte) & _PAGE_RO) == 0;
}
extern inline int pte_dirty(pte_t pte)
{
/* A pte is neither clean nor dirty on s/390. The dirty bit
* is in the storage key. See page_test_and_clear_dirty for
* details.
*/
return 0;
}
extern inline int pte_young(pte_t pte)
{
/* A pte is neither young nor old on s/390. The young bit
* is in the storage key. See page_test_and_clear_young for
* details.
*/
return 0;
}
/*
* pgd/pmd/pte modification functions
*/
#ifndef __s390x__
extern inline void pgd_clear(pgd_t * pgdp) { }
extern inline void pmd_clear(pmd_t * pmdp)
{
pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
pmd_val(pmdp[2]) = _PAGE_TABLE_INV;
pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
}
#else /* __s390x__ */
extern inline void pgd_clear(pgd_t * pgdp)
{
pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
}
extern inline void pmd_clear(pmd_t * pmdp)
{
pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
}
#endif /* __s390x__ */
extern inline void pte_clear(pte_t *ptep)
{
pte_val(*ptep) = _PAGE_INVALID_EMPTY;
}
/*
* The following pte modification functions only work if
* pte_present() is true. Undefined behaviour if not..
*/
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= PAGE_MASK;
pte_val(pte) |= pgprot_val(newprot);
return pte;
}
extern inline pte_t pte_wrprotect(pte_t pte)
{
/* Do not clobber _PAGE_INVALID_NONE pages! */
if (!(pte_val(pte) & _PAGE_INVALID))
pte_val(pte) |= _PAGE_RO;
return pte;
}
extern inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) &= ~_PAGE_RO;
return pte;
}
extern inline pte_t pte_mkclean(pte_t pte)
{
/* The only user of pte_mkclean is the fork() code.
We must *not* clear the *physical* page dirty bit
just because fork() wants to clear the dirty bit in
*one* of the page's mappings. So we just do nothing. */
return pte;
}
extern inline pte_t pte_mkdirty(pte_t pte)
{
/* We do not explicitly set the dirty bit because the
* sske instruction is slow. It is faster to let the
* next instruction set the dirty bit.
*/
return pte;
}
extern inline pte_t pte_mkold(pte_t pte)
{
/* S/390 doesn't keep its dirty/referenced bit in the pte.
* There is no point in clearing the real referenced bit.
*/
return pte;
}
extern inline pte_t pte_mkyoung(pte_t pte)
{
/* S/390 doesn't keep its dirty/referenced bit in the pte.
* There is no point in setting the real referenced bit.
*/
return pte;
}
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
return 0;
}
static inline int
ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_young(ptep);
}
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
return 0;
}
static inline int
ptep_clear_flush_dirty(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_dirty(ptep);
}
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(ptep);
return pte;
}
static inline pte_t
ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
#ifndef __s390x__
if (!(pte_val(pte) & _PAGE_INVALID)) {
/* S390 has 1mb segments, we are emulating 4MB segments */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
__asm__ __volatile__ ("ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address) );
}
#else /* __s390x__ */
if (!(pte_val(pte) & _PAGE_INVALID))
__asm__ __volatile__ ("ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep),
"a" (ptep), "a" (address) );
#endif /* __s390x__ */
pte_clear(ptep);
return pte;
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
}
static inline void ptep_mkdirty(pte_t *ptep)
{
pte_mkdirty(*ptep);
}
static inline void
ptep_establish(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry)
{
ptep_clear_flush(vma, address, ptep);
set_pte(ptep, entry);
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
ptep_establish(__vma, __address, __ptep, __entry)
/*
* Test and clear dirty bit in storage key.
* We can't clear the changed bit atomically. This is a potential
* race against modification of the referenced bit. This function
* should therefore only be called if it is not mapped in any
* address space.
*/
#define page_test_and_clear_dirty(_page) \
({ \
struct page *__page = (_page); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
int __skey = page_get_storage_key(__physpage); \
if (__skey & _PAGE_CHANGED) \
page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\
(__skey & _PAGE_CHANGED); \
})
/*
* Test and clear referenced bit in storage key.
*/
#define page_test_and_clear_young(page) \
({ \
struct page *__page = (page); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
int __ccode; \
asm volatile ("rrbe 0,%1\n\t" \
"ipm %0\n\t" \
"srl %0,28\n\t" \
: "=d" (__ccode) : "a" (__physpage) : "cc" ); \
(__ccode & 2); \
})
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
return __pte;
}
#define mk_pte(pg, pgprot) \
({ \
struct page *__page = (pg); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
__pte; \
})
#define pfn_pte(pfn, pgprot) \
({ \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
__pte; \
})
#define SetPageUptodate(_page) \
do { \
struct page *__page = (_page); \
if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
page_test_and_clear_dirty(_page); \
} while (0)
#ifdef __s390x__
#define pfn_pmd(pfn, pgprot) \
({ \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \
__pmd; \
})
#endif /* __s390x__ */
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK)
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#ifndef __s390x__
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) dir;
}
#else /* __s390x__ */
/* Find an entry in the second-level page table.. */
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr))
#endif /* __s390x__ */
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/*
* 31 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.
* Bits 0, 20 and bit 23 have to be zero, otherwise an specification
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
* Bit 21 and bit 22 are the page invalid bit and the page protection
* bit. We set both to indicate a swapped page.
* Bit 30 and 31 are used to distinguish the different page types. For
* a swapped page these bits need to be zero.
* This leaves the bits 1-19 and bits 24-29 to store type and offset.
* We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
* plus 24 for the offset.
* 0| offset |0110|o|type |00|
* 0 0000000001111111111 2222 2 22222 33
* 0 1234567890123456789 0123 4 56789 01
*
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.
* Bits 52 and bit 55 have to be zero, otherwise an specification
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
* Bit 53 and bit 54 are the page invalid bit and the page protection
* bit. We set both to indicate a swapped page.
* Bit 62 and 63 are used to distinguish the different page types. For
* a swapped page these bits need to be zero.
* This leaves the bits 0-51 and bits 56-61 to store type and offset.
* We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
* plus 56 for the offset.
* | offset |0110|o|type |00|
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) |
((offset & 1) << 7) | ((offset & 0xffffe) << 11);
return pte;
}
#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#ifndef __s390x__
# define PTE_FILE_MAX_BITS 26
#else /* __s390x__ */
# define PTE_FILE_MAX_BITS 59
#endif /* __s390x__ */
#define pte_to_pgoff(__pte) \
((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
#define pgoff_to_pte(__off) \
((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
| _PAGE_INVALID_FILE })
#endif /* !__ASSEMBLY__ */
#define kern_addr_valid(addr) (1)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#define __HAVE_ARCH_PTEP_ESTABLISH
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#include <asm-generic/pgtable.h>
#endif /* _S390_PAGE_H */

View File

@@ -0,0 +1,34 @@
/*
* include/asm-s390/poll.h
*
* S390 version
*
* Derived from "include/asm-i386/poll.h"
*/
#ifndef __S390_POLL_H
#define __S390_POLL_H
/* These are specified by iBCS2 */
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#define POLLWRNORM 0x0100
#define POLLWRBAND 0x0200
#define POLLMSG 0x0400
#define POLLREMOVE 0x1000
struct pollfd {
int fd;
short events;
short revents;
};
#endif

View File

@@ -0,0 +1,99 @@
/*
* include/asm-s390/posix_types.h
*
* S390 version
*
* Derived from "include/asm-i386/posix_types.h"
*/
#ifndef __ARCH_S390_POSIX_TYPES_H
#define __ARCH_S390_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned long __kernel_size_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
#ifndef __s390x__
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
#else /* __s390x__ */
typedef unsigned int __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
typedef __kernel_uid_t __kernel_uid32_t;
typedef __kernel_gid_t __kernel_gid32_t;
typedef unsigned short __kernel_old_dev_t;
#endif /* __s390x__ */
typedef struct {
#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
#else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
int __val[2];
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
} __kernel_fsid_t;
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#ifndef _S390_BITOPS_H
#include <asm/bitops.h>
#endif
#undef __FD_SET
#define __FD_SET(fd,fdsetp) set_bit(fd,fdsetp->fds_bits)
#undef __FD_CLR
#define __FD_CLR(fd,fdsetp) clear_bit(fd,fdsetp->fds_bits)
#undef __FD_ISSET
#define __FD_ISSET(fd,fdsetp) test_bit(fd,fdsetp->fds_bits)
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)*/
#endif

View File

@@ -0,0 +1,355 @@
/*
* include/asm-s390/processor.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/processor.h"
* Copyright (C) 1994, Linus Torvalds
*/
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
#include <asm/page.h>
#include <asm/ptrace.h>
#ifdef __KERNEL__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; })
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
* Members of this structure are referenced in head.S, so think twice
* before touching them. [mj]
*/
typedef struct
{
unsigned int version : 8;
unsigned int ident : 24;
unsigned int machine : 16;
unsigned int unused : 16;
} __attribute__ ((packed)) cpuid_t;
struct cpuinfo_S390
{
cpuid_t cpu_id;
__u16 cpu_addr;
__u16 cpu_nr;
unsigned long loops_per_jiffy;
unsigned long *pgd_quick;
#ifdef __s390x__
unsigned long *pmd_quick;
#endif /* __s390x__ */
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
};
extern void print_cpu_info(struct cpuinfo_S390 *);
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
/*
* User space process size: 2GB for 31 bit, 4TB for 64 bit.
*/
#ifndef __s390x__
# define TASK_SIZE (0x80000000UL)
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
# define DEFAULT_TASK_SIZE (0x80000000UL)
#else /* __s390x__ */
# define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \
(0x80000000UL) : (0x40000000000UL))
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
# define DEFAULT_TASK_SIZE (0x40000000000UL)
#endif /* __s390x__ */
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
#define HAVE_ARCH_PICK_MMAP_LAYOUT
typedef struct {
__u32 ar4;
} mm_segment_t;
/*
* Thread structure
*/
struct thread_struct {
s390_fp_regs fp_regs;
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
unsigned long user_seg; /* HSTD */
mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */
unsigned int error_code; /* error-code of last prog-excep. */
unsigned int trap_no;
per_struct per_info;
/* Used to give failing instruction back to user for ieee exceptions */
unsigned long ieee_instruction_pointer;
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
};
typedef struct thread_struct thread_struct;
/*
* Stack layout of a C stack frame.
*/
#ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain;
unsigned long empty1[5];
unsigned long gprs[10];
unsigned int empty2[8];
};
#else
struct stack_frame {
unsigned long empty1[5];
unsigned int empty2[8];
unsigned long gprs[10];
unsigned long back_chain;
};
#endif
#define ARCH_MIN_TASKALIGN 8
#ifndef __s390x__
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE
#else /* __s390x__ */
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
#endif /* __s390x__ */
#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
{0},{0},{0},{0},{0},{0}}}, \
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, \
sizeof(init_stack) + (unsigned long) &init_stack, \
__SWAPPER_PG_DIR, \
{0}, \
0,0,0, \
(per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
0, 0 \
}
/*
* Do necessary setup to start up a new thread.
*/
#ifndef __s390x__
#define start_thread(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp ; \
} while (0)
#else /* __s390x__ */
#define start_thread(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER32_BITS; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
} while (0)
#endif /* __s390x__ */
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/*
* Return saved PC of a blocked thread.
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
/*
* Print register of task into buffer. Used in fs/proc/array.c.
*/
extern char *task_show_regs(struct task_struct *task, char *buffer);
extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p);
#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)))
#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
/*
* Give up the time slice of the virtual PU.
*/
#ifndef __s390x__
# define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory")
#else /* __s390x__ */
# define cpu_relax() \
asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory")
#endif /* __s390x__ */
/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask (unsigned long mask)
{
unsigned long addr;
psw_t psw;
psw.mask = mask;
#ifndef __s390x__
asm volatile (
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,4(%1)\n"
" lpsw 0(%1)\n"
"1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" );
#else /* __s390x__ */
asm volatile (
" larl %0,1f\n"
" stg %0,8(%1)\n"
" lpswe 0(%1)\n"
"1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" );
#endif /* __s390x__ */
}
/*
* Function to stop a processor until an interruption occurred
*/
static inline void enabled_wait(void)
{
unsigned long reg;
psw_t wait_psw;
wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
PSW_MASK_MCHECK | PSW_MASK_WAIT;
#ifndef __s390x__
asm volatile (
" basr %0,0\n"
"0: la %0,1f-0b(%0)\n"
" st %0,4(%1)\n"
" oi 4(%1),0x80\n"
" lpsw 0(%1)\n"
"1:"
: "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
: "memory", "cc" );
#else /* __s390x__ */
asm volatile (
" larl %0,0f\n"
" stg %0,8(%1)\n"
" lpswe 0(%1)\n"
"0:"
: "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
: "memory", "cc" );
#endif /* __s390x__ */
}
/*
* Function to drop a processor into disabled wait state
*/
static inline void disabled_wait(unsigned long code)
{
char psw_buffer[2*sizeof(psw_t)];
unsigned long ctl_buf;
psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
& -sizeof(psw_t));
dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT;
dw_psw->addr = code;
/*
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
#ifndef __s390x__
asm volatile (" stctl 0,0,0(%2)\n"
" ni 0(%2),0xef\n" /* switch off protection */
" lctl 0,0,0(%2)\n"
" stpt 0xd8\n" /* store timer */
" stckc 0xe0\n" /* store clock comparator */
" stpx 0x108\n" /* store prefix register */
" stam 0,15,0x120\n" /* store access registers */
" std 0,0x160\n" /* store f0 */
" std 2,0x168\n" /* store f2 */
" std 4,0x170\n" /* store f4 */
" std 6,0x178\n" /* store f6 */
" stm 0,15,0x180\n" /* store general registers */
" stctl 0,15,0x1c0\n" /* store control registers */
" oi 0x1c0,0x10\n" /* fake protection bit */
" lpsw 0(%1)"
: "=m" (ctl_buf)
: "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" );
#else /* __s390x__ */
asm volatile (" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
" lctlg 0,0,0(%2)\n"
" lghi 1,0x1000\n"
" stpt 0x328(1)\n" /* store timer */
" stckc 0x330(1)\n" /* store clock comparator */
" stpx 0x318(1)\n" /* store prefix register */
" stam 0,15,0x340(1)\n" /* store access registers */
" stfpc 0x31c(1)\n" /* store fpu control */
" std 0,0x200(1)\n" /* store f0 */
" std 1,0x208(1)\n" /* store f1 */
" std 2,0x210(1)\n" /* store f2 */
" std 3,0x218(1)\n" /* store f3 */
" std 4,0x220(1)\n" /* store f4 */
" std 5,0x228(1)\n" /* store f5 */
" std 6,0x230(1)\n" /* store f6 */
" std 7,0x238(1)\n" /* store f7 */
" std 8,0x240(1)\n" /* store f8 */
" std 9,0x248(1)\n" /* store f9 */
" std 10,0x250(1)\n" /* store f10 */
" std 11,0x258(1)\n" /* store f11 */
" std 12,0x260(1)\n" /* store f12 */
" std 13,0x268(1)\n" /* store f13 */
" std 14,0x270(1)\n" /* store f14 */
" std 15,0x278(1)\n" /* store f15 */
" stmg 0,15,0x280(1)\n" /* store general registers */
" stctg 0,15,0x380(1)\n" /* store control registers */
" oi 0x384(1),0x10\n" /* fake protection bit */
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (dw_psw), "a" (&ctl_buf),
"m" (dw_psw) : "cc", "0", "1");
#endif /* __s390x__ */
}
/*
* CPU idle notifier chain.
*/
#define CPU_IDLE 0
#define CPU_NOT_IDLE 1
struct notifier_block;
int register_idle_notifier(struct notifier_block *nb);
int unregister_idle_notifier(struct notifier_block *nb);
#endif
#endif /* __ASM_S390_PROCESSOR_H */

View File

@@ -0,0 +1,475 @@
/*
* include/asm-s390/ptrace.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#ifndef _S390_PTRACE_H
#define _S390_PTRACE_H
/*
* Offsets in the user_regs_struct. They are used for the ptrace
* system call and in entry.S
*/
#ifndef __s390x__
#define PT_PSWMASK 0x00
#define PT_PSWADDR 0x04
#define PT_GPR0 0x08
#define PT_GPR1 0x0C
#define PT_GPR2 0x10
#define PT_GPR3 0x14
#define PT_GPR4 0x18
#define PT_GPR5 0x1C
#define PT_GPR6 0x20
#define PT_GPR7 0x24
#define PT_GPR8 0x28
#define PT_GPR9 0x2C
#define PT_GPR10 0x30
#define PT_GPR11 0x34
#define PT_GPR12 0x38
#define PT_GPR13 0x3C
#define PT_GPR14 0x40
#define PT_GPR15 0x44
#define PT_ACR0 0x48
#define PT_ACR1 0x4C
#define PT_ACR2 0x50
#define PT_ACR3 0x54
#define PT_ACR4 0x58
#define PT_ACR5 0x5C
#define PT_ACR6 0x60
#define PT_ACR7 0x64
#define PT_ACR8 0x68
#define PT_ACR9 0x6C
#define PT_ACR10 0x70
#define PT_ACR11 0x74
#define PT_ACR12 0x78
#define PT_ACR13 0x7C
#define PT_ACR14 0x80
#define PT_ACR15 0x84
#define PT_ORIGGPR2 0x88
#define PT_FPC 0x90
/*
* A nasty fact of life that the ptrace api
* only supports passing of longs.
*/
#define PT_FPR0_HI 0x98
#define PT_FPR0_LO 0x9C
#define PT_FPR1_HI 0xA0
#define PT_FPR1_LO 0xA4
#define PT_FPR2_HI 0xA8
#define PT_FPR2_LO 0xAC
#define PT_FPR3_HI 0xB0
#define PT_FPR3_LO 0xB4
#define PT_FPR4_HI 0xB8
#define PT_FPR4_LO 0xBC
#define PT_FPR5_HI 0xC0
#define PT_FPR5_LO 0xC4
#define PT_FPR6_HI 0xC8
#define PT_FPR6_LO 0xCC
#define PT_FPR7_HI 0xD0
#define PT_FPR7_LO 0xD4
#define PT_FPR8_HI 0xD8
#define PT_FPR8_LO 0XDC
#define PT_FPR9_HI 0xE0
#define PT_FPR9_LO 0xE4
#define PT_FPR10_HI 0xE8
#define PT_FPR10_LO 0xEC
#define PT_FPR11_HI 0xF0
#define PT_FPR11_LO 0xF4
#define PT_FPR12_HI 0xF8
#define PT_FPR12_LO 0xFC
#define PT_FPR13_HI 0x100
#define PT_FPR13_LO 0x104
#define PT_FPR14_HI 0x108
#define PT_FPR14_LO 0x10C
#define PT_FPR15_HI 0x110
#define PT_FPR15_LO 0x114
#define PT_CR_9 0x118
#define PT_CR_10 0x11C
#define PT_CR_11 0x120
#define PT_IEEE_IP 0x13C
#define PT_LASTOFF PT_IEEE_IP
#define PT_ENDREGS 0x140-1
#define GPR_SIZE 4
#define CR_SIZE 4
#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
#else /* __s390x__ */
#define PT_PSWMASK 0x00
#define PT_PSWADDR 0x08
#define PT_GPR0 0x10
#define PT_GPR1 0x18
#define PT_GPR2 0x20
#define PT_GPR3 0x28
#define PT_GPR4 0x30
#define PT_GPR5 0x38
#define PT_GPR6 0x40
#define PT_GPR7 0x48
#define PT_GPR8 0x50
#define PT_GPR9 0x58
#define PT_GPR10 0x60
#define PT_GPR11 0x68
#define PT_GPR12 0x70
#define PT_GPR13 0x78
#define PT_GPR14 0x80
#define PT_GPR15 0x88
#define PT_ACR0 0x90
#define PT_ACR1 0x94
#define PT_ACR2 0x98
#define PT_ACR3 0x9C
#define PT_ACR4 0xA0
#define PT_ACR5 0xA4
#define PT_ACR6 0xA8
#define PT_ACR7 0xAC
#define PT_ACR8 0xB0
#define PT_ACR9 0xB4
#define PT_ACR10 0xB8
#define PT_ACR11 0xBC
#define PT_ACR12 0xC0
#define PT_ACR13 0xC4
#define PT_ACR14 0xC8
#define PT_ACR15 0xCC
#define PT_ORIGGPR2 0xD0
#define PT_FPC 0xD8
#define PT_FPR0 0xE0
#define PT_FPR1 0xE8
#define PT_FPR2 0xF0
#define PT_FPR3 0xF8
#define PT_FPR4 0x100
#define PT_FPR5 0x108
#define PT_FPR6 0x110
#define PT_FPR7 0x118
#define PT_FPR8 0x120
#define PT_FPR9 0x128
#define PT_FPR10 0x130
#define PT_FPR11 0x138
#define PT_FPR12 0x140
#define PT_FPR13 0x148
#define PT_FPR14 0x150
#define PT_FPR15 0x158
#define PT_CR_9 0x160
#define PT_CR_10 0x168
#define PT_CR_11 0x170
#define PT_IEEE_IP 0x1A8
#define PT_LASTOFF PT_IEEE_IP
#define PT_ENDREGS 0x1B0-1
#define GPR_SIZE 8
#define CR_SIZE 8
#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
#endif /* __s390x__ */
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_CRS 16
#define NUM_ACRS 16
#define FPR_SIZE 8
#define FPC_SIZE 4
#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
#define ACR_SIZE 4
#define PTRACE_OLDSETOPTIONS 21
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/setup.h>
typedef union
{
float f;
double d;
__u64 ui;
struct
{
__u32 hi;
__u32 lo;
} fp;
} freg_t;
typedef struct
{
__u32 fpc;
freg_t fprs[NUM_FPRS];
} s390_fp_regs;
#define FPC_EXCEPTION_MASK 0xF8000000
#define FPC_FLAGS_MASK 0x00F80000
#define FPC_DXC_MASK 0x0000FF00
#define FPC_RM_MASK 0x00000003
#define FPC_VALID_MASK 0xF8F8FF03
/* this typedef defines how a Program Status Word looks like */
typedef struct
{
unsigned long mask;
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
#define PSW_MASK_DAT 0x04000000UL
#define PSW_MASK_IO 0x02000000UL
#define PSW_MASK_EXT 0x01000000UL
#define PSW_MASK_KEY 0x00F00000UL
#define PSW_MASK_MCHECK 0x00040000UL
#define PSW_MASK_WAIT 0x00020000UL
#define PSW_MASK_PSTATE 0x00010000UL
#define PSW_MASK_ASC 0x0000C000UL
#define PSW_MASK_CC 0x00003000UL
#define PSW_MASK_PM 0x00000F00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
#define PSW_BASE_BITS 0x00080000UL
#define PSW_ASC_PRIMARY 0x00000000UL
#define PSW_ASC_ACCREG 0x00004000UL
#define PSW_ASC_SECONDARY 0x00008000UL
#define PSW_ASC_HOME 0x0000C000UL
#else /* __s390x__ */
#define PSW_MASK_PER 0x4000000000000000UL
#define PSW_MASK_DAT 0x0400000000000000UL
#define PSW_MASK_IO 0x0200000000000000UL
#define PSW_MASK_EXT 0x0100000000000000UL
#define PSW_MASK_KEY 0x00F0000000000000UL
#define PSW_MASK_MCHECK 0x0004000000000000UL
#define PSW_MASK_WAIT 0x0002000000000000UL
#define PSW_MASK_PSTATE 0x0001000000000000UL
#define PSW_MASK_ASC 0x0000C00000000000UL
#define PSW_MASK_CC 0x0000300000000000UL
#define PSW_MASK_PM 0x00000F0000000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
#define PSW_BASE_BITS 0x0000000180000000UL
#define PSW_BASE32_BITS 0x0000000080000000UL
#define PSW_ASC_PRIMARY 0x0000000000000000UL
#define PSW_ASC_ACCREG 0x0000400000000000UL
#define PSW_ASC_SECONDARY 0x0000800000000000UL
#define PSW_ASC_HOME 0x0000C00000000000UL
#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE)
#endif /* __s390x__ */
#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY)
#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE)
/* This macro merges a NEW PSW mask specified by the user into
the currently active PSW mask CURRENT, modifying only those
bits in CURRENT that the user may be allowed to change: this
is the condition code and the program mask bits. */
#define PSW_MASK_MERGE(CURRENT,NEW) \
(((CURRENT) & ~(PSW_MASK_CC|PSW_MASK_PM)) | \
((NEW) & (PSW_MASK_CC|PSW_MASK_PM)))
/*
* The s390_regs structure is used to define the elf_gregset_t.
*/
typedef struct
{
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned int acrs[NUM_ACRS];
unsigned long orig_gpr2;
} s390_regs;
#ifdef __KERNEL__
/*
* The pt_regs struct defines the way the registers are stored on
* the stack during a system call.
*/
struct pt_regs
{
unsigned long args[1];
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
unsigned short ilc;
unsigned short trap;
};
#endif
/*
* Now for the program event recording (trace) definitions.
*/
typedef struct
{
unsigned long cr[3];
} per_cr_words;
#define PER_EM_MASK 0xE8000000UL
typedef struct
{
#ifdef __s390x__
unsigned : 32;
#endif /* __s390x__ */
unsigned em_branching : 1;
unsigned em_instruction_fetch : 1;
/*
* Switching on storage alteration automatically fixes
* the storage alteration event bit in the users std.
*/
unsigned em_storage_alteration : 1;
unsigned em_gpr_alt_unused : 1;
unsigned em_store_real_address : 1;
unsigned : 3;
unsigned branch_addr_ctl : 1;
unsigned : 1;
unsigned storage_alt_space_ctl : 1;
unsigned : 21;
unsigned long starting_addr;
unsigned long ending_addr;
} per_cr_bits;
typedef struct
{
unsigned short perc_atmid;
unsigned long address;
unsigned char access_id;
} per_lowcore_words;
typedef struct
{
unsigned perc_branching : 1;
unsigned perc_instruction_fetch : 1;
unsigned perc_storage_alteration : 1;
unsigned perc_gpr_alt_unused : 1;
unsigned perc_store_real_address : 1;
unsigned : 3;
unsigned atmid_psw_bit_31 : 1;
unsigned atmid_validity_bit : 1;
unsigned atmid_psw_bit_32 : 1;
unsigned atmid_psw_bit_5 : 1;
unsigned atmid_psw_bit_16 : 1;
unsigned atmid_psw_bit_17 : 1;
unsigned si : 2;
unsigned long address;
unsigned : 4;
unsigned access_id : 4;
} per_lowcore_bits;
typedef struct
{
union {
per_cr_words words;
per_cr_bits bits;
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
* switched on & off while not affecting other tracing
*/
unsigned single_step : 1;
unsigned instruction_fetch : 1;
unsigned : 30;
/*
* These addresses are copied into cr10 & cr11 if single
* stepping is switched off
*/
unsigned long starting_addr;
unsigned long ending_addr;
union {
per_lowcore_words words;
per_lowcore_bits bits;
} lowcore;
} per_struct;
typedef struct
{
unsigned int len;
unsigned long kernel_addr;
unsigned long process_addr;
} ptrace_area;
/*
* S/390 specific non posix ptrace requests. I chose unusual values so
* they are unlikely to clash with future ptrace definitions.
*/
#define PTRACE_PEEKUSR_AREA 0x5000
#define PTRACE_POKEUSR_AREA 0x5001
#define PTRACE_PEEKTEXT_AREA 0x5002
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
#define PTRACE_POKEDATA_AREA 0x5005
/*
* PT_PROT definition is loosely based on hppa bsd definition in
* gdb/hppab-nat.c
*/
#define PTRACE_PROT 21
typedef enum
{
ptprot_set_access_watchpoint,
ptprot_set_write_watchpoint,
ptprot_disable_watchpoint
} ptprot_flags;
typedef struct
{
unsigned long lowaddr;
unsigned long hiaddr;
ptprot_flags prot;
} ptprot_area;
/* Sequence of bytes for breakpoint illegal instruction. */
#define S390_BREAKPOINT {0x0,0x1}
#define S390_BREAKPOINT_U16 ((__u16)0x0001)
#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
#define S390_SYSCALL_SIZE 2
/*
* The user_regs_struct defines the way the user registers are
* store on the stack for signal handling.
*/
struct user_regs_struct
{
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned int acrs[NUM_ACRS];
unsigned long orig_gpr2;
s390_fp_regs fp_regs;
/*
* These per registers are in here so that gdb can modify them
* itself as there is no "official" ptrace interface for hardware
* watchpoints. This is the way intel does it.
*/
per_struct per_info;
unsigned long ieee_instruction_pointer;
/* Used to give failing instruction back to user for ieee exceptions */
};
#ifdef __KERNEL__
#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs * regs);
#endif
#endif /* __ASSEMBLY__ */
#endif /* _S390_PTRACE_H */

View File

@@ -0,0 +1,401 @@
/*
* linux/include/asm/qdio.h
*
* Linux for S/390 QDIO base support, Hipersocket base support
* version 2
*
* Copyright 2000,2002 IBM Corporation
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
*
*/
#ifndef __QDIO_H__
#define __QDIO_H__
#define VERSION_QDIO_H "$Revision: 1.57 $"
/* note, that most of the typedef's are from ingo. */
#include <linux/interrupt.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#define QDIO_NAME "qdio "
#ifndef __s390x__
#define QDIO_32_BIT
#endif /* __s390x__ */
/**** CONSTANTS, that are relied on without using these symbols *****/
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* used in width of unsigned int */
/************************ END of CONSTANTS **************************/
#define QDIO_MAX_BUFFERS_PER_Q 128 /* must be a power of 2 (%x=&(x-1)*/
#define QDIO_BUF_ORDER 7 /* 2**this == number of pages used for sbals in 1 q */
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define SBAL_SIZE 256
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
struct qdio_buffer_element{
unsigned int flags;
unsigned int length;
#ifdef QDIO_32_BIT
void *reserved;
#endif /* QDIO_32_BIT */
void *addr;
} __attribute__ ((packed,aligned(16)));
struct qdio_buffer{
volatile struct qdio_buffer_element element[16];
} __attribute__ ((packed,aligned(256)));
/* params are: ccw_device, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(struct ccw_device *,unsigned int,unsigned int,
unsigned int,unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
#define QDIO_STATUS_OUTBOUND_INT 0x02
#define QDIO_STATUS_LOOK_FOR_ERROR 0x04
#define QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR 0x08
#define QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR 0x10
#define QDIO_STATUS_ACTIVATE_CHECK_CONDITION 0x20
#define QDIO_SIGA_ERROR_ACCESS_EXCEPTION 0x10
#define QDIO_SIGA_ERROR_B_BIT_SET 0x20
/* for qdio_initialize */
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
struct qdio_initialize {
struct ccw_device *cdev;
unsigned char q_format;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; /*adapter dependent*/
/* pointer to 128 bytes or NULL, if no param field */
unsigned char *qib_param_field; /* adapter dependent */
/* pointer to no_queues*128 words of data or NULL */
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int min_input_threshold;
unsigned int max_input_threshold;
unsigned int min_output_threshold;
unsigned int max_output_threshold;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
unsigned long int_parm;
unsigned long flags;
void **input_sbal_addr_array; /* addr of n*128 void ptrs */
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
};
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *,int flags);
#define QDIO_STATE_MUST_USE_OUTB_PCI 0x00000001
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_initialize */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
extern unsigned long qdio_get_status(int irq);
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_UNDER_INTERRUPT 0x04
#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
adapter interrupts */
#define QDIO_FLAG_DONT_SIGA 0x10
extern int do_QDIO(struct ccw_device*, unsigned int flags,
unsigned int queue_number,
unsigned int qidx,unsigned int count,
struct qdio_buffer *buffers);
extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device*);
unsigned char qdio_get_slsb_state(struct ccw_device*, unsigned int flag,
unsigned int queue_number,
unsigned int qidx);
extern void qdio_init_scrubber(void);
struct qdesfmt0 {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sliba; /* storage-list-information-block
address */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* storage-list address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* storage-list-state-block address */
unsigned int res4; /* reserved */
unsigned int akey : 4; /* access key for DLIB */
unsigned int bkey : 4; /* access key for SL */
unsigned int ckey : 4; /* access key for SBALs */
unsigned int dkey : 4; /* access key for SLSB */
unsigned int res5 : 16; /* reserved */
} __attribute__ ((packed));
/*
* Queue-Description record (QDR)
*/
struct qdr {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2 : 8; /* reserved */
unsigned int iqdcnt : 8; /* input-queue-descriptor count */
unsigned int res3 : 8; /* reserved */
unsigned int oqdcnt : 8; /* output-queue-descriptor count */
unsigned int res4 : 8; /* reserved */
unsigned int iqdsz : 8; /* input-queue-descriptor size */
unsigned int res5 : 8; /* reserved */
unsigned int oqdsz : 8; /* output-queue-descriptor size */
unsigned int res6[9]; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res7; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long qiba; /* queue-information-block address */
unsigned int res8; /* reserved */
unsigned int qkey : 4; /* queue-informatio-block key */
unsigned int res9 : 28; /* reserved */
/* union _qd {*/ /* why this? */
struct qdesfmt0 qdf0[126];
/* } qd;*/
} __attribute__ ((packed,aligned(4096)));
/*
* queue information block (QIB)
*/
#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
struct qib {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long isliba; /* absolute address of 1st
input SLIB */
#ifdef QDIO_32_BIT
unsigned long res4; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long osliba; /* absolute address of 1st
output SLIB */
unsigned int res5; /* reserved */
unsigned int res6; /* reserved */
unsigned char ebcnam[8]; /* adapter identifier in EBCDIC */
unsigned char res7[88]; /* reserved */
unsigned char parm[QDIO_MAX_BUFFERS_PER_Q];
/* implementation dependent
parameters */
} __attribute__ ((packed,aligned(256)));
/*
* storage-list-information block element (SLIBE)
*/
struct slibe {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long parms; /* implementation dependent
parameters */
};
/*
* storage-list-information block (SLIB)
*/
struct slib {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long nsliba; /* next SLIB address (if any) */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* SL address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* SLSB address */
unsigned char res4[1000]; /* reserved */
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; /* SLIB elements */
} __attribute__ ((packed,aligned(2048)));
struct sbal_flags {
unsigned char res1 : 1; /* reserved */
unsigned char last : 1; /* last entry */
unsigned char cont : 1; /* contiguous storage */
unsigned char res2 : 1; /* reserved */
unsigned char frag : 2; /* fragmentation (s.below) */
unsigned char res3 : 2; /* reserved */
} __attribute__ ((packed));
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
/* Naught of interest beyond this point */
#define SBAL_FLAGS0_PCI 0x40
struct sbal_sbalf_0 {
unsigned char res1 : 1; /* reserved */
unsigned char pci : 1; /* PCI indicator */
unsigned char cont : 1; /* data continuation */
unsigned char sbtype: 2; /* storage-block type (OpenFCP) */
unsigned char res2 : 3; /* reserved */
} __attribute__ ((packed));
struct sbal_sbalf_1 {
unsigned char res1 : 4; /* reserved */
unsigned char key : 4; /* storage key */
} __attribute__ ((packed));
struct sbal_sbalf_14 {
unsigned char res1 : 4; /* reserved */
unsigned char erridx : 4; /* error index */
} __attribute__ ((packed));
struct sbal_sbalf_15 {
unsigned char reason; /* reserved */
} __attribute__ ((packed));
union sbal_sbalf {
struct sbal_sbalf_0 i0;
struct sbal_sbalf_1 i1;
struct sbal_sbalf_14 i14;
struct sbal_sbalf_15 i15;
unsigned char value;
};
struct sbal_element {
union {
struct sbal_flags bits; /* flags */
unsigned char value;
} flags;
unsigned int res1 : 16; /* reserved */
union sbal_sbalf sbalf; /* SBAL flags */
unsigned int res2 : 16; /* reserved */
unsigned int count : 16; /* data count */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long addr; /* absolute data address */
} __attribute__ ((packed,aligned(16)));
/*
* strorage-block access-list (SBAL)
*/
struct sbal {
struct sbal_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed,aligned(256)));
/*
* storage-list (SL)
*/
struct sl_element {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sbal; /* absolute SBAL address */
} __attribute__ ((packed));
struct sl {
struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed,aligned(1024)));
/*
* storage-list-state block (SLSB)
*/
struct slsb_flags {
unsigned char owner : 2; /* SBAL owner */
unsigned char type : 1; /* buffer type */
unsigned char state : 5; /* processing state */
} __attribute__ ((packed));
struct slsb {
union {
unsigned char val[QDIO_MAX_BUFFERS_PER_Q];
struct slsb_flags flags[QDIO_MAX_BUFFERS_PER_Q];
} acc;
} __attribute__ ((packed,aligned(256)));
/*
* SLSB values
*/
#define SLSB_OWNER_PROG 1
#define SLSB_OWNER_CU 2
#define SLSB_TYPE_INPUT 0
#define SLSB_TYPE_OUTPUT 1
#define SLSB_STATE_NOT_INIT 0
#define SLSB_STATE_EMPTY 1
#define SLSB_STATE_PRIMED 2
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_P_INPUT_NOT_INIT 0x80
#define SLSB_P_INPUT_PROCESSING 0x81
#define SLSB_CU_INPUT_EMPTY 0x41
#define SLSB_P_INPUT_PRIMED 0x82
#define SLSB_P_INPUT_HALTED 0x8E
#define SLSB_P_INPUT_ERROR 0x8F
#define SLSB_P_OUTPUT_NOT_INIT 0xA0
#define SLSB_P_OUTPUT_EMPTY 0xA1
#define SLSB_CU_OUTPUT_PRIMED 0x62
#define SLSB_P_OUTPUT_HALTED 0xAE
#define SLSB_P_OUTPUT_ERROR 0xAF
#define SLSB_ERROR_DURING_LOOKUP 0xFF
#endif /* __QDIO_H__ */

View File

@@ -0,0 +1,78 @@
/*
* include/asm-s390/qeth.h
*
* ioctl definitions for qeth driver
*
* Copyright (C) 2004 IBM Corporation
*
* Author(s): Thomas Spatzier <tspat@de.ibm.com>
*
*/
#ifndef __ASM_S390_QETH_IOCTL_H__
#define __ASM_S390_QETH_IOCTL_H__
#include <linux/ioctl.h>
#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE)
#define SIOC_QETH_ARP_QUERY_INFO (SIOCDEVPRIVATE + 1)
#define SIOC_QETH_ARP_ADD_ENTRY (SIOCDEVPRIVATE + 2)
#define SIOC_QETH_ARP_REMOVE_ENTRY (SIOCDEVPRIVATE + 3)
#define SIOC_QETH_ARP_FLUSH_CACHE (SIOCDEVPRIVATE + 4)
#define SIOC_QETH_ADP_SET_SNMP_CONTROL (SIOCDEVPRIVATE + 5)
#define SIOC_QETH_GET_CARD_TYPE (SIOCDEVPRIVATE + 6)
struct qeth_arp_cache_entry {
__u8 macaddr[6];
__u8 reserved1[2];
__u8 ipaddr[16]; /* for both IPv4 and IPv6 */
__u8 reserved2[32];
} __attribute__ ((packed));
struct qeth_arp_qi_entry7 {
__u8 media_specific[32];
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 macaddr[6];
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry7_short {
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 macaddr[6];
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry5 {
__u8 media_specific[32];
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry5_short {
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 ipaddr[4];
} __attribute__((packed));
/*
* can be set by user if no "media specific information" is wanted
* -> saves a lot of space in user space buffer
*/
#define QETH_QARP_STRIP_ENTRIES 0x8000
#define QETH_QARP_REQUEST_MASK 0x00ff
/* data sent to user space as result of query arp ioctl */
#define QETH_QARP_USER_DATA_SIZE 20000
#define QETH_QARP_MASK_OFFSET 4
#define QETH_QARP_ENTRIES_OFFSET 6
struct qeth_arp_query_user_data {
union {
__u32 data_len; /* set by user space program */
__u32 no_entries; /* set by kernel */
} u;
__u16 mask_bits;
char *entries;
} __attribute__((packed));
#endif /* __ASM_S390_QETH_IOCTL_H__ */

View File

@@ -0,0 +1,60 @@
/*
* include/asm-s390/resource.h
*
* S390 version
*
* Derived from "include/asm-i386/resources.h"
*/
#ifndef _S390_RESOURCE_H
#define _S390_RESOURCE_H
/*
* Resource limits
*/
#define RLIMIT_CPU 0 /* CPU time in ms */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_CORE 4 /* max core file size */
#define RLIMIT_RSS 5 /* max resident set size */
#define RLIMIT_NPROC 6 /* max number of processes */
#define RLIMIT_NOFILE 7 /* max number of open files */
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
#define RLIMIT_AS 9 /* address space limit */
#define RLIMIT_LOCKS 10 /* maximum file locks held */
#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
#define RLIM_NLIMITS 13
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*/
#define RLIM_INFINITY (~0UL)
#ifdef __KERNEL__
#define INIT_RLIMITS \
{ \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ _STK_LIM, RLIM_INFINITY }, \
{ 0, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
{ MLOCK_LIMIT, MLOCK_LIMIT }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
{ MQ_BYTES_MAX, MQ_BYTES_MAX }, \
}
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,355 @@
#ifndef _S390_RWSEM_H
#define _S390_RWSEM_H
/*
* include/asm-s390/rwsem.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
*/
/*
*
* The MSW of the count is the negated number of active writers and waiting
* lockers, and the LSW is the total number of active locks
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
* uncontended lock. This can be determined because XADD returns the old value.
* Readers increment by 1 and see a positive value when uncontended, negative
* if there are writers (and maybe) readers waiting (in which case it goes to
* sleep).
*
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
* be extended to 65534 by manually checking the whole MSW rather than relying
* on the S flag.
*
* The value of ACTIVE_BIAS supports up to 65535 active processes.
*
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
struct rwsem_waiter;
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
/*
* the semaphore definition
*/
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
#ifndef __s390x__
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#else /* __s390x__ */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#endif /* __s390x__ */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* initialisation
*/
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static inline void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" ahi %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" aghi %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count),
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
if (old < 0)
rwsem_down_read_failed(sem);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jm 1f\n"
" ahi %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
" aghi %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count),
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
return old >= 0 ? 1 : 0;
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" a %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" ag %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" );
if (old != 0)
rwsem_down_write_failed(sem);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
signed long old;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%2)\n"
"0: ltr %0,%0\n"
" jnz 1f\n"
" cs %0,%4,0(%2)\n"
" jl 0b\n"
#else /* __s390x__ */
" lg %0,0(%2)\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%4,0(%2)\n"
" jl 0b\n"
#endif /* __s390x__ */
"1:"
: "=&d" (old), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count),
"d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" );
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" ahi %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" aghi %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count),
"i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" a %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" ag %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_WAITING_BIAS;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" a %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" ag %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" );
if (new > 1)
rwsem_downgrade_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" ar %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" agr %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" );
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
#ifndef __s390x__
" l %0,0(%3)\n"
"0: lr %1,%0\n"
" ar %1,%5\n"
" cs %0,%1,0(%3)\n"
" jl 0b"
#else /* __s390x__ */
" lg %0,0(%3)\n"
"0: lgr %1,%0\n"
" agr %1,%5\n"
" csg %0,%1,0(%3)\n"
" jl 0b"
#endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" );
return new;
}
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */

View File

@@ -0,0 +1,34 @@
#ifndef _S390_EXTINT_H
#define _S390_EXTINT_H
/*
* include/asm-s390/s390_ext.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
typedef void (*ext_int_handler_t)(struct pt_regs *regs, __u16 code);
/*
* Warning: if you change ext_int_info_t you have to change the
* external interrupt handler in entry.S too.
*/
typedef struct ext_int_info_t {
struct ext_int_info_t *next;
ext_int_handler_t handler;
__u16 code;
} __attribute__ ((packed)) ext_int_info_t;
extern ext_int_info_t *ext_int_hash[];
int register_external_interrupt(__u16 code, ext_int_handler_t handler);
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
#endif

View File

@@ -0,0 +1,16 @@
#ifndef _ASMS390_SCATTERLIST_H
#define _ASMS390_SCATTERLIST_H
struct scatterlist {
struct page *page;
unsigned int offset;
unsigned int length;
};
#ifdef __s390x__
#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL)
#else
#define ISA_DMA_THRESHOLD (0xffffffffUL)
#endif
#endif /* _ASMS390X_SCATTERLIST_H */

View File

@@ -0,0 +1,6 @@
#ifndef _S390_SECTIONS_H
#define _S390_SECTIONS_H
#include <asm-generic/sections.h>
#endif

View File

@@ -0,0 +1,4 @@
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
#endif

View File

@@ -0,0 +1,110 @@
/*
* include/asm-s390/semaphore.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/semaphore.h"
* (C) Copyright 1996 Linus Torvalds
*/
#ifndef _S390_SEMAPHORE_H
#define _S390_SEMAPHORE_H
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
/*
* Note that any negative value of count is equivalent to 0,
* but additionally indicates that some process(es) might be
* sleeping on `wait'.
*/
atomic_t count;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
static inline void down(struct semaphore * sem)
{
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
}
static inline int down_trylock(struct semaphore * sem)
{
int old_val, new_val;
/*
* This inline assembly atomically implements the equivalent
* to the following C code:
* old_val = sem->count.counter;
* if ((new_val = old_val) > 0)
* sem->count.counter = --new_val;
* In the ppc code this is called atomic_dec_if_positive.
*/
__asm__ __volatile__ (
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jle 1f\n"
" ahi %1,-1\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
: "a" (&sem->count.counter), "m" (sem->count.counter)
: "cc", "memory" );
return old_val <= 0;
}
static inline void up(struct semaphore * sem)
{
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
#endif

View File

@@ -0,0 +1,29 @@
#ifndef _S390_SEMBUF_H
#define _S390_SEMBUF_H
/*
* The semid64_ds structure for S/390 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem (for !__s390x__)
* - 2 miscellaneous 32-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
#ifndef __s390x__
unsigned long __unused1;
#endif /* ! __s390x__ */
__kernel_time_t sem_ctime; /* last change time */
#ifndef __s390x__
unsigned long __unused2;
#endif /* ! __s390x__ */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* _S390_SEMBUF_H */

View File

@@ -0,0 +1,82 @@
/*
* include/asm-s390/setup.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*/
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
#define PARMAREA 0x10400
#define COMMAND_LINE_SIZE 896
#define RAMDISK_ORIGIN 0x800000
#define RAMDISK_SIZE 0x800000
#define MEMORY_CHUNKS 16 /* max 0x7fff */
#ifndef __ASSEMBLY__
#ifndef __s390x__
#define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C))
#define INITRD_SIZE (*(unsigned long *) (0x10414))
#else /* __s390x__ */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#endif /* __s390x__ */
#define COMMAND_LINE ((char *) (0x10480))
/*
* Machine features detected in head.S
*/
extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & 1)
#define MACHINE_IS_P390 (machine_flags & 4)
#define MACHINE_HAS_MVPG (machine_flags & 16)
#define MACHINE_HAS_DIAG44 (machine_flags & 32)
#define MACHINE_HAS_IDTE (machine_flags & 128)
#ifndef __s390x__
#define MACHINE_HAS_IEEE (machine_flags & 2)
#define MACHINE_HAS_CSP (machine_flags & 8)
#else /* __s390x__ */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
#endif /* __s390x__ */
#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
/*
* Console mode. Override with conmode=
*/
extern unsigned int console_mode;
extern unsigned int console_devno;
extern unsigned int console_irq;
#define CONSOLE_IS_UNDEFINED (console_mode == 0)
#define CONSOLE_IS_SCLP (console_mode == 1)
#define CONSOLE_IS_3215 (console_mode == 2)
#define CONSOLE_IS_3270 (console_mode == 3)
#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
#else
#ifndef __s390x__
#define IPL_DEVICE 0x10404
#define INITRD_START 0x1040C
#define INITRD_SIZE 0x10414
#else /* __s390x__ */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#endif /* __s390x__ */
#define COMMAND_LINE 0x10480
#endif
#endif

View File

@@ -0,0 +1,139 @@
/* Machine-dependent software floating-point definitions.
S/390 kernel version.
Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson (rth@cygnus.com),
Jakub Jelinek (jj@ultra.linux.cz),
David S. Miller (davem@redhat.com) and
Peter Maydell (pmaydell@chiark.greenend.org.uk).
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#ifndef _SFP_MACHINE_H
#define _SFP_MACHINE_H
#include <linux/config.h>
#define _FP_W_TYPE_SIZE 32
#define _FP_W_TYPE unsigned long
#define _FP_WS_TYPE signed long
#define _FP_I_TYPE long
#define _FP_MUL_MEAT_S(R,X,Y) \
_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y) \
_FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) \
_FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
#define _FP_NANSIGN_S 0
#define _FP_NANSIGN_D 0
#define _FP_NANSIGN_Q 0
#define _FP_KEEPNANFRACP 1
/*
* If one NaN is signaling and the other is not,
* we choose that one, otherwise we choose X.
*/
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
&& !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
{ \
R##_s = Y##_s; \
_FP_FRAC_COPY_##wc(R,Y); \
} \
else \
{ \
R##_s = X##_s; \
_FP_FRAC_COPY_##wc(R,X); \
} \
R##_c = FP_CLS_NAN; \
} while (0)
/* Some assembly to speed things up. */
#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
unsigned int __r2 = (x2) + (y2); \
unsigned int __r1 = (x1); \
unsigned int __r0 = (x0); \
__asm__ (" alr %2,%3\n" \
" brc 12,0f\n" \
" lhi 0,1\n" \
" alr %1,0\n" \
" brc 12,0f\n" \
" alr %0,0\n" \
"0:" \
: "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
: "d" (y0), "i" (1) : "cc", "0" ); \
__asm__ (" alr %1,%2\n" \
" brc 12,0f\n" \
" ahi %0,1\n" \
"0:" \
: "+&d" (__r2), "+&d" (__r1) \
: "d" (y1) : "cc" ); \
(r2) = __r2; \
(r1) = __r1; \
(r0) = __r0; \
})
#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) ({ \
unsigned int __r2 = (x2) - (y2); \
unsigned int __r1 = (x1); \
unsigned int __r0 = (x0); \
__asm__ (" slr %2,%3\n" \
" brc 3,0f\n" \
" lhi 0,1\n" \
" slr %1,0\n" \
" brc 3,0f\n" \
" slr %0,0\n" \
"0:" \
: "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
: "d" (y0) : "cc", "0" ); \
__asm__ (" slr %1,%2\n" \
" brc 3,0f\n" \
" ahi %0,-1\n" \
"0:" \
: "+&d" (__r2), "+&d" (__r1) \
: "d" (y1) : "cc" ); \
(r2) = __r2; \
(r1) = __r1; \
(r0) = __r0; \
})
#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
/* Obtain the current rounding mode. */
#define FP_ROUNDMODE mode
/* Exception flags. */
#define FP_EX_INVALID 0x800000
#define FP_EX_DIVZERO 0x400000
#define FP_EX_OVERFLOW 0x200000
#define FP_EX_UNDERFLOW 0x100000
#define FP_EX_INEXACT 0x080000
/* We write the results always */
#define FP_INHIBIT_RESULTS 0
#endif

View File

@@ -0,0 +1,48 @@
#ifndef _S390_SHMBUF_H
#define _S390_SHMBUF_H
/*
* The shmid64_ds structure for S/390 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem (for !__s390x__)
* - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
#ifndef __s390x__
unsigned long __unused1;
#endif /* ! __s390x__ */
__kernel_time_t shm_dtime; /* last detach time */
#ifndef __s390x__
unsigned long __unused2;
#endif /* ! __s390x__ */
__kernel_time_t shm_ctime; /* last change time */
#ifndef __s390x__
unsigned long __unused3;
#endif /* ! __s390x__ */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* _S390_SHMBUF_H */

View File

@@ -0,0 +1,13 @@
/*
* include/asm-s390/shmparam.h
*
* S390 version
*
* Derived from "include/asm-i386/shmparam.h"
*/
#ifndef _ASM_S390_SHMPARAM_H
#define _ASM_S390_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif /* _ASM_S390_SHMPARAM_H */

View File

@@ -0,0 +1,69 @@
/*
* include/asm-s390/sigcontext.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
*/
#ifndef _ASM_S390_SIGCONTEXT_H
#define _ASM_S390_SIGCONTEXT_H
#define __NUM_GPRS 16
#define __NUM_FPRS 16
#define __NUM_ACRS 16
#ifndef __s390x__
/* Has to be at least _NSIG_WORDS from asm/signal.h */
#define _SIGCONTEXT_NSIG 64
#define _SIGCONTEXT_NSIG_BPW 32
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 96
#else /* __s390x__ */
/* Has to be at least _NSIG_WORDS from asm/signal.h */
#define _SIGCONTEXT_NSIG 64
#define _SIGCONTEXT_NSIG_BPW 64
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 160
#endif /* __s390x__ */
#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
#define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
typedef struct
{
unsigned long mask;
unsigned long addr;
} __attribute__ ((aligned(8))) _psw_t;
typedef struct
{
_psw_t psw;
unsigned long gprs[__NUM_GPRS];
unsigned int acrs[__NUM_ACRS];
} _s390_regs_common;
typedef struct
{
unsigned int fpc;
double fprs[__NUM_FPRS];
} _s390_fp_regs;
typedef struct
{
_s390_regs_common regs;
_s390_fp_regs fpregs;
} _sigregs;
struct sigcontext
{
unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
_sigregs *sregs;
};
#endif

View File

@@ -0,0 +1,24 @@
/*
* include/asm-s390/siginfo.h
*
* S390 version
*
* Derived from "include/asm-i386/siginfo.h"
*/
#ifndef _S390_SIGINFO_H
#define _S390_SIGINFO_H
#ifdef __s390x__
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#endif
#ifdef CONFIG_ARCH_S390X
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
#else
#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
#endif
#include <asm-generic/siginfo.h>
#endif

View File

@@ -0,0 +1,197 @@
/*
* include/asm-s390/signal.h
*
* S390 version
*
* Derived from "include/asm-i386/signal.h"
*/
#ifndef _ASMS390_SIGNAL_H
#define _ASMS390_SIGNAL_H
#include <linux/types.h>
#include <linux/time.h>
/* Avoid too many header ordering problems. */
struct siginfo;
struct pt_regs;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
#include <asm/sigcontext.h>
#define _NSIG _SIGCONTEXT_NSIG
#define _NSIG_BPW _SIGCONTEXT_NSIG_BPW
#define _NSIG_WORDS _SIGCONTEXT_NSIG_WORDS
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#else
/* Here we must cater to libcs that poke about in kernel headers. */
#define NSIG 32
typedef unsigned long sigset_t;
#endif /* __KERNEL__ */
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#ifdef __KERNEL__
/*
* These values of sa_flags are used only by the kernel as part of the
* irq handling routines.
*
* SA_INTERRUPT is also used by the irq handling routines.
* SA_SHIRQ is for shared interrupt support on PCI and EISA.
*/
#define SA_PROBE SA_ONESHOT
#define SA_SAMPLE_RANDOM SA_RESTART
#define SA_SHIRQ 0x04000000
#endif
#define SIG_BLOCK 0 /* for blocking signals */
#define SIG_UNBLOCK 1 /* for unblocking signals */
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#else
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
#ifndef __s390x__ /* lovely */
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
#else /* __s390x__ */
unsigned long sa_flags;
void (*sa_restorer)(void);
sigset_t sa_mask;
#endif /* __s390x__ */
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#endif

View File

@@ -0,0 +1,131 @@
/*
* include/asm-s390/sigp.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* sigp.h by D.J. Barrow (c) IBM 1999
* contains routines / structures for signalling other S/390 processors in an
* SMP configuration.
*/
#ifndef __SIGP__
#define __SIGP__
#include <asm/ptrace.h>
#include <asm/atomic.h>
/* get real cpu address from logical cpu number */
extern volatile int __cpu_logical_map[];
typedef enum
{
sigp_unassigned=0x0,
sigp_sense,
sigp_external_call,
sigp_emergency_signal,
sigp_start,
sigp_stop,
sigp_restart,
sigp_unassigned1,
sigp_unassigned2,
sigp_stop_and_store_status,
sigp_unassigned3,
sigp_initial_cpu_reset,
sigp_cpu_reset,
sigp_set_prefix,
sigp_store_status_at_address,
sigp_store_extended_status_at_address
} sigp_order_code;
typedef __u32 sigp_status_word;
typedef enum
{
sigp_order_code_accepted=0,
sigp_status_stored,
sigp_busy,
sigp_not_operational
} sigp_ccode;
/*
* Definitions for the external call
*/
/* 'Bit' signals, asynchronous */
typedef enum
{
ec_schedule=0,
ec_call_function,
ec_bit_last
} ec_bit_sig;
/*
* Signal processor
*/
extern __inline__ sigp_ccode
signal_processor(__u16 cpu_addr, sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
" sr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
: "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
: "cc" , "memory", "1" );
return ccode;
}
/*
* Signal processor with parameter
*/
extern __inline__ sigp_ccode
signal_processor_p(__u32 parameter, __u16 cpu_addr,
sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
" lr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
: "cc" , "memory", "1" );
return ccode;
}
/*
* Signal processor with parameter and return status
*/
extern __inline__ sigp_ccode
signal_processor_ps(__u32 *statusptr, __u32 parameter,
__u16 cpu_addr, sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
" sr 2,2\n" /* clear status */
" lr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" st 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "=m" (*statusptr)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
: "cc" , "memory", "2" , "3"
);
return ccode;
}
#endif /* __SIGP__ */

View File

@@ -0,0 +1,83 @@
/*
* include/asm-s390/smp.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/bitops.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
#include <asm/lowcore.h>
/*
s390 specific smp.c headers
*/
typedef struct
{
int intresting;
sigp_ccode ccode;
__u32 status;
__u16 cpu;
} sigp_info;
extern int smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu);
#define NO_PROC_ID 0xFF /* No processor magic marker */
/*
* This magic constant controls our willingness to transfer
* a process across CPUs. Such a transfer incurs misses on the L1
* cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
* gut feeling is this will vary by board in value. For a board
* with separate L2 cache it probably depends also on the RSS, and
* for a board with shared L2 cache it ought to decay fast as other
* processes are run.
*/
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu);
extern __inline__ __u16 hard_smp_processor_id(void)
{
__u16 cpu_address;
__asm__ ("stap %0\n" : "=m" (cpu_address));
return cpu_address;
}
#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
#endif
#ifndef CONFIG_SMP
static inline int
smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu)
{
func(info);
return 0;
}
#define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; })
#endif
#endif

View File

@@ -0,0 +1,58 @@
/*
* include/asm-s390/socket.h
*
* S390 version
*
* Derived from "include/asm-i386/socket.h"
*/
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
#include <asm/sockios.h>
/* For setsockopt(2) */
#define SOL_SOCKET 1
#define SO_DEBUG 1
#define SO_REUSEADDR 2
#define SO_TYPE 3
#define SO_ERROR 4
#define SO_DONTROUTE 5
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
/* To add :#define SO_REUSEPORT 15 */
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
#define SO_RCVTIMEO 20
#define SO_SNDTIMEO 21
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 22
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
#define SO_SECURITY_ENCRYPTION_NETWORK 24
#define SO_BINDTODEVICE 25
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#endif /* _ASM_SOCKET_H */

View File

@@ -0,0 +1,20 @@
/*
* include/asm-s390/sockios.h
*
* S390 version
*
* Derived from "include/asm-i386/sockios.h"
*/
#ifndef __ARCH_S390_SOCKIOS__
#define __ARCH_S390_SOCKIOS__
/* Socket-level I/O control calls. */
#define FIOSETOWN 0x8901
#define SIOCSPGRP 0x8902
#define FIOGETOWN 0x8903
#define SIOCGPGRP 0x8904
#define SIOCATMARK 0x8905
#define SIOCGSTAMP 0x8906 /* Get stamp */
#endif

View File

@@ -0,0 +1,233 @@
/*
* include/asm-s390/spinlock.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/spinlock.h"
*/
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#ifdef __s390x__
/*
* Grmph, take care of %&#! user space programs that include
* asm/spinlock.h. The diagnose is only available in kernel
* context.
*/
#ifdef __KERNEL__
#include <asm/lowcore.h>
#define __DIAG44_INSN "ex"
#define __DIAG44_OPERAND __LC_DIAG44_OPCODE
#else
#define __DIAG44_INSN "#"
#define __DIAG44_OPERAND 0
#endif
#endif /* __s390x__ */
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*/
typedef struct {
volatile unsigned int lock;
} __attribute__ ((aligned (4))) spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0)
#define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern inline void _raw_spin_lock(spinlock_t *lp)
{
#ifndef __s390x__
unsigned int reg1, reg2;
__asm__ __volatile__(" bras %0,1f\n"
"0: diag 0,0,68\n"
"1: slr %1,%1\n"
" cs %1,%0,0(%3)\n"
" jl 0b\n"
: "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
: "a" (&lp->lock), "m" (lp->lock)
: "cc", "memory" );
#else /* __s390x__ */
unsigned long reg1, reg2;
__asm__ __volatile__(" bras %1,1f\n"
"0: " __DIAG44_INSN " 0,%4\n"
"1: slr %0,%0\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
: "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock)
: "a" (&lp->lock), "i" (__DIAG44_OPERAND),
"m" (lp->lock) : "cc", "memory" );
#endif /* __s390x__ */
}
extern inline int _raw_spin_trylock(spinlock_t *lp)
{
unsigned long reg;
unsigned int result;
__asm__ __volatile__(" basr %1,0\n"
"0: cs %0,%1,0(%3)"
: "=d" (result), "=&d" (reg), "=m" (lp->lock)
: "a" (&lp->lock), "m" (lp->lock), "0" (0)
: "cc", "memory" );
return !result;
}
extern inline void _raw_spin_unlock(spinlock_t *lp)
{
unsigned int old;
__asm__ __volatile__("cs %0,%3,0(%4)"
: "=d" (old), "=m" (lp->lock)
: "0" (lp->lock), "d" (0), "a" (lp)
: "cc", "memory" );
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
typedef struct {
volatile unsigned long lock;
volatile unsigned long owner_pc;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_is_locked(x) ((x)->lock != 0)
#ifndef __s390x__
#define _raw_read_lock(rw) \
asm volatile(" l 2,0(%1)\n" \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: la 2,0(2)\n" /* clear high (=write) bit */ \
" la 3,1(2)\n" /* one more reader */ \
" cs 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \
: "=m" ((rw)->lock) : "a" (&(rw)->lock), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */
#define _raw_read_lock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
"0: " __DIAG44_INSN " 0,%2\n" \
"1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \
" la 3,1(2)\n" /* one more reader */ \
" csg 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \
: "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */
#ifndef __s390x__
#define _raw_read_unlock(rw) \
asm volatile(" l 2,0(%1)\n" \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: lr 3,2\n" \
" ahi 3,-1\n" /* one less reader */ \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) : "a" (&(rw)->lock), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */
#define _raw_read_unlock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
"0: " __DIAG44_INSN " 0,%2\n" \
"1: lgr 3,2\n" \
" bctgr 3,0\n" /* one less reader */ \
" csg 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */
#ifndef __s390x__
#define _raw_write_lock(rw) \
asm volatile(" lhi 3,1\n" \
" sll 3,31\n" /* new lock value = 0x80000000 */ \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: slr 2,2\n" /* old lock value must be 0 */ \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) : "a" (&(rw)->lock), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */
#define _raw_write_lock(rw) \
asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \
" j 1f\n" \
"0: " __DIAG44_INSN " 0,%2\n" \
"1: slgr 2,2\n" /* old lock value must be 0 */ \
" csg 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */
#ifndef __s390x__
#define _raw_write_unlock(rw) \
asm volatile(" slr 3,3\n" /* new lock value = 0 */ \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: lhi 2,1\n" \
" sll 2,31\n" /* old lock value must be 0x80000000 */ \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) : "a" (&(rw)->lock), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#else /* __s390x__ */
#define _raw_write_unlock(rw) \
asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \
" j 1f\n" \
"0: " __DIAG44_INSN " 0,%2\n" \
"1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\
" csg 2,3,0(%1)\n" \
" jl 0b" \
: "=m" ((rw)->lock) \
: "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \
"m" ((rw)->lock) : "2", "3", "cc", "memory" )
#endif /* __s390x__ */
extern inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long result, reg;
__asm__ __volatile__(
#ifndef __s390x__
" lhi %1,1\n"
" sll %1,31\n"
" cs %0,%1,0(%3)"
#else /* __s390x__ */
" llihh %1,0x8000\n"
"0: csg %0,%1,0(%3)\n"
#endif /* __s390x__ */
: "=d" (result), "=&d" (reg), "=m" (rw->lock)
: "a" (&rw->lock), "m" (rw->lock), "0" (0UL)
: "cc", "memory" );
return result == 0;
}
#endif /* __ASM_SPINLOCK_H */

View File

@@ -0,0 +1,105 @@
/*
* include/asm-s390/stat.h
*
* S390 version
*
* Derived from "include/asm-i386/stat.h"
*/
#ifndef _S390_STAT_H
#define _S390_STAT_H
#ifndef __s390x__
struct __old_kernel_stat {
unsigned short st_dev;
unsigned short st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
};
struct stat {
unsigned short st_dev;
unsigned short __pad1;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned short __pad2;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned long long st_dev;
unsigned int __pad1;
#define STAT64_HAS_BROKEN_ST_INO 1
unsigned long __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned long long st_rdev;
unsigned int __pad3;
long long st_size;
unsigned long st_blksize;
unsigned char __pad4[4];
unsigned long __pad5; /* future possible st_blocks high bits */
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
unsigned long long st_ino;
};
#else /* __s390x__ */
struct stat {
unsigned long st_dev;
unsigned long st_ino;
unsigned long st_nlink;
unsigned int st_mode;
unsigned int st_uid;
unsigned int st_gid;
unsigned int __pad1;
unsigned long st_rdev;
unsigned long st_size;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long st_blksize;
long st_blocks;
unsigned long __unused[3];
};
#endif /* __s390x__ */
#define STAT_HAVE_NSEC 1
#endif

View File

@@ -0,0 +1,71 @@
/*
* include/asm-s390/statfs.h
*
* S390 version
*
* Derived from "include/asm-i386/statfs.h"
*/
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
#ifndef __s390x__
#include <asm-generic/statfs.h>
#else
#ifndef __KERNEL_STRICT_NAMES
#include <linux/types.h>
typedef __kernel_fsid_t fsid_t;
#endif
/*
* This is ugly -- we're already 64-bit clean, so just duplicate the
* definitions.
*/
struct statfs {
int f_type;
int f_bsize;
long f_blocks;
long f_bfree;
long f_bavail;
long f_files;
long f_ffree;
__kernel_fsid_t f_fsid;
int f_namelen;
int f_frsize;
int f_spare[5];
};
struct statfs64 {
int f_type;
int f_bsize;
long f_blocks;
long f_bfree;
long f_bavail;
long f_files;
long f_ffree;
__kernel_fsid_t f_fsid;
int f_namelen;
int f_frsize;
int f_spare[5];
};
struct compat_statfs64 {
__u32 f_type;
__u32 f_bsize;
__u64 f_blocks;
__u64 f_bfree;
__u64 f_bavail;
__u64 f_files;
__u64 f_ffree;
__kernel_fsid_t f_fsid;
__u32 f_namelen;
__u32 f_frsize;
__u32 f_spare[5];
};
#endif /* __s390x__ */
#endif

View File

@@ -0,0 +1,138 @@
/*
* include/asm-s390/string.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#ifndef _S390_STRING_H_
#define _S390_STRING_H_
#ifdef __KERNEL__
#ifndef _LINUX_TYPES_H
#include <linux/types.h>
#endif
#define __HAVE_ARCH_BCOPY /* arch function */
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
#define __HAVE_ARCH_STRLCAT /* arch function */
#define __HAVE_ARCH_STRLCPY /* arch function */
#define __HAVE_ARCH_STRLEN /* inline & arch function */
#define __HAVE_ARCH_STRNCAT /* arch function */
#define __HAVE_ARCH_STRNCPY /* arch function */
#define __HAVE_ARCH_STRNLEN /* inline & arch function */
#define __HAVE_ARCH_STRRCHR /* arch function */
#define __HAVE_ARCH_STRSTR /* arch function */
/* Prototypes for non-inlined arch strings functions. */
extern int memcmp(const void *, const void *, size_t);
extern void *memcpy(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
extern int strcmp(const char *,const char *);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
extern char *strncat(char *, const char *, size_t);
extern char *strncpy(char *, const char *, size_t);
extern char *strrchr(const char *, int);
extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_MEMMOVE
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
#undef __HAVE_ARCH_STRNICMP
#undef __HAVE_ARCH_STRPBRK
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
#if !defined(IN_ARCH_STRING_C)
static inline void *memchr(const void * s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
" jl 1f\n"
" la %0,0\n"
"1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
return (void *) ret;
}
static inline void *memscan(void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
const void *ret = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
return (void *) ret;
}
static inline char *strcat(char *dst, const char *src)
{
register int r0 asm("0") = 0;
unsigned long dummy;
char *ret = dst;
asm volatile ("0: srst %0,%1\n"
" jo 0b\n"
"1: mvst %0,%2\n"
" jo 1b"
: "=&a" (dummy), "+a" (dst), "+a" (src)
: "d" (r0), "0" (0) : "cc", "memory" );
return ret;
}
static inline char *strcpy(char *dst, const char *src)
{
register int r0 asm("0") = 0;
char *ret = dst;
asm volatile ("0: mvst %0,%1\n"
" jo 0b"
: "+&a" (dst), "+&a" (src) : "d" (r0)
: "cc", "memory" );
return ret;
}
static inline size_t strlen(const char *s)
{
register unsigned long r0 asm("0") = 0;
const char *tmp = s;
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc" );
return r0 - (unsigned long) s;
}
static inline size_t strnlen(const char * s, size_t n)
{
register int r0 asm("0") = 0;
const char *tmp = s;
const char *end = s + n;
asm volatile ("0: srst %0,%1\n"
" jo 0b"
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc" );
return end - s;
}
#endif /* !IN_ARCH_STRING_C */
#endif /* __KERNEL__ */
#endif /* __S390_STRING_H_ */

View File

@@ -0,0 +1,5 @@
#ifndef __ASM_S390_SUSPEND_H
#define __ASM_S390_SUSPEND_H
#endif

View File

@@ -0,0 +1,451 @@
/*
* include/asm-s390/system.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*
* Derived from "include/asm-i386/system.h"
*/
#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#ifdef __KERNEL__
struct task_struct;
extern struct task_struct *__switch_to(void *, void *);
#ifdef __s390x__
#define __FLAG_SHIFT 56
#else /* ! __s390x__ */
#define __FLAG_SHIFT 24
#endif /* ! __s390x__ */
static inline void save_fp_regs(s390_fp_regs *fpregs)
{
asm volatile (
" std 0,8(%1)\n"
" std 2,24(%1)\n"
" std 4,40(%1)\n"
" std 6,56(%1)"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
if (!MACHINE_HAS_IEEE)
return;
asm volatile(
" stfpc 0(%1)\n"
" std 1,16(%1)\n"
" std 3,32(%1)\n"
" std 5,48(%1)\n"
" std 7,64(%1)\n"
" std 8,72(%1)\n"
" std 9,80(%1)\n"
" std 10,88(%1)\n"
" std 11,96(%1)\n"
" std 12,104(%1)\n"
" std 13,112(%1)\n"
" std 14,120(%1)\n"
" std 15,128(%1)\n"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
}
static inline void restore_fp_regs(s390_fp_regs *fpregs)
{
asm volatile (
" ld 0,8(%0)\n"
" ld 2,24(%0)\n"
" ld 4,40(%0)\n"
" ld 6,56(%0)"
: : "a" (fpregs), "m" (*fpregs) );
if (!MACHINE_HAS_IEEE)
return;
asm volatile(
" lfpc 0(%0)\n"
" ld 1,16(%0)\n"
" ld 3,32(%0)\n"
" ld 5,48(%0)\n"
" ld 7,64(%0)\n"
" ld 8,72(%0)\n"
" ld 9,80(%0)\n"
" ld 10,88(%0)\n"
" ld 11,96(%0)\n"
" ld 12,104(%0)\n"
" ld 13,112(%0)\n"
" ld 14,120(%0)\n"
" ld 15,128(%0)\n"
: : "a" (fpregs), "m" (*fpregs) );
}
static inline void save_access_regs(unsigned int *acrs)
{
asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" );
}
static inline void restore_access_regs(unsigned int *acrs)
{
asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) );
}
#define switch_to(prev,next,last) do { \
if (prev == next) \
break; \
save_fp_regs(&prev->thread.fp_regs); \
restore_fp_regs(&next->thread.fp_regs); \
save_access_regs(&prev->thread.acrs[0]); \
restore_access_regs(&next->thread.acrs[0]); \
prev = __switch_to(prev,next); \
} while (0)
#define prepare_arch_switch(rq, next) do { } while(0)
#define task_running(rq, p) ((rq)->curr == (p))
#define finish_arch_switch(rq, prev) do { \
set_fs(current->thread.mm_segment); \
spin_unlock_irq(&(rq)->lock); \
} while (0)
#define nop() __asm__ __volatile__ ("nop")
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
{
unsigned long addr, old;
int shift;
switch (size) {
case 1:
addr = (unsigned long) ptr;
shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3;
asm volatile(
" l %0,0(%4)\n"
"0: lr 0,%0\n"
" nr 0,%3\n"
" or 0,%2\n"
" cs %0,0,0(%4)\n"
" jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
"m" (*(int *) addr) : "memory", "cc", "0" );
x = old >> shift;
break;
case 2:
addr = (unsigned long) ptr;
shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2;
asm volatile(
" l %0,0(%4)\n"
"0: lr 0,%0\n"
" nr 0,%3\n"
" or 0,%2\n"
" cs %0,0,0(%4)\n"
" jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
"m" (*(int *) addr) : "memory", "cc", "0" );
x = old >> shift;
break;
case 4:
asm volatile (
" l %0,0(%3)\n"
"0: cs %0,%2,0(%3)\n"
" jl 0b\n"
: "=&d" (old), "=m" (*(int *) ptr)
: "d" (x), "a" (ptr), "m" (*(int *) ptr)
: "memory", "cc" );
x = old;
break;
#ifdef __s390x__
case 8:
asm volatile (
" lg %0,0(%3)\n"
"0: csg %0,%2,0(%3)\n"
" jl 0b\n"
: "=&d" (old), "=m" (*(long *) ptr)
: "d" (x), "a" (ptr), "m" (*(long *) ptr)
: "memory", "cc" );
x = old;
break;
#endif /* __s390x__ */
}
return x;
}
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
unsigned long addr, prev, tmp;
int shift;
switch (size) {
case 1:
addr = (unsigned long) ptr;
shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3;
asm volatile(
" l %0,0(%4)\n"
"0: nr %0,%5\n"
" lr %1,%0\n"
" or %0,%2\n"
" or %1,%3\n"
" cs %0,%1,0(%4)\n"
" jnl 1f\n"
" xr %1,%0\n"
" nr %1,%5\n"
" jnz 0b\n"
"1:"
: "=&d" (prev), "=&d" (tmp)
: "d" (old << shift), "d" (new << shift), "a" (ptr),
"d" (~(255 << shift))
: "memory", "cc" );
return prev >> shift;
case 2:
addr = (unsigned long) ptr;
shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2;
asm volatile(
" l %0,0(%4)\n"
"0: nr %0,%5\n"
" lr %1,%0\n"
" or %0,%2\n"
" or %1,%3\n"
" cs %0,%1,0(%4)\n"
" jnl 1f\n"
" xr %1,%0\n"
" nr %1,%5\n"
" jnz 0b\n"
"1:"
: "=&d" (prev), "=&d" (tmp)
: "d" (old << shift), "d" (new << shift), "a" (ptr),
"d" (~(65535 << shift))
: "memory", "cc" );
return prev >> shift;
case 4:
asm volatile (
" cs %0,%2,0(%3)\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
: "memory", "cc" );
return prev;
#ifdef __s390x__
case 8:
asm volatile (
" csg %0,%2,0(%3)\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
: "memory", "cc" );
return prev;
#endif /* __s390x__ */
}
return old;
}
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*
* This is very similar to the ppc eieio/sync instruction in that is
* does a checkpoint syncronisation & makes sure that
* all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
*/
#define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
# define SYNC_OTHER_CORES(x) eieio()
#define mb() eieio()
#define rmb() eieio()
#define wmb() eieio()
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */
#define local_irq_enable() ({ \
unsigned long __dummy; \
__asm__ __volatile__ ( \
"stosm 0(%1),0x03" \
: "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
})
#define local_irq_disable() ({ \
unsigned long __flags; \
__asm__ __volatile__ ( \
"stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
__flags; \
})
#define local_save_flags(x) \
__asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
#define local_irq_restore(x) \
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!((flags >> __FLAG_SHIFT) & 3); \
})
#ifdef __s390x__
#define __load_psw(psw) \
__asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
#define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \
" bras 1,0f\n" \
" lctlg 0,0,0(%0)\n" \
"0: ex %1,0(1)" \
: : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
})
#define __ctl_store(array, low, high) ({ \
__asm__ __volatile__ ( \
" bras 1,0f\n" \
" stctg 0,0,0(%1)\n" \
"0: ex %2,0(1)" \
: "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
})
#define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[24]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctg 0,0,0(%1)\n" \
" lctlg 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" lg 0,0(%1)\n" \
" ogr 0,%3\n" /* set the bit */ \
" stg 0,0(%1)\n" \
"1: ex %2,6(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (1L<<(bit)) \
: "cc", "0", "1" ); \
})
#define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctg 0,0,0(%1)\n" \
" lctlg 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" lg 0,0(%1)\n" \
" ngr 0,%3\n" /* set the bit */ \
" stg 0,0(%1)\n" \
"1: ex %2,6(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (~(1L<<(bit))) \
: "cc", "0", "1" ); \
})
#else /* __s390x__ */
#define __load_psw(psw) \
__asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
#define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \
" bras 1,0f\n" \
" lctl 0,0,0(%0)\n" \
"0: ex %1,0(1)" \
: : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
})
#define __ctl_store(array, low, high) ({ \
__asm__ __volatile__ ( \
" bras 1,0f\n" \
" stctl 0,0,0(%1)\n" \
"0: ex %2,0(1)" \
: "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
})
#define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctl 0,0,0(%1)\n" \
" lctl 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" l 0,0(%1)\n" \
" or 0,%3\n" /* set the bit */ \
" st 0,0(%1)\n" \
"1: ex %2,4(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (1<<(bit)) \
: "cc", "0", "1" ); \
})
#define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctl 0,0,0(%1)\n" \
" lctl 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" l 0,0(%1)\n" \
" nr 0,%3\n" /* set the bit */ \
" st 0,0(%1)\n" \
"1: ex %2,4(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (~(1<<(bit))) \
: "cc", "0", "1" ); \
})
#endif /* __s390x__ */
/* For spinlocks etc */
#define local_irq_save(x) ((x) = local_irq_disable())
#ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit);
extern void smp_ctl_clear_bit(int cr, int bit);
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif /* CONFIG_SMP */
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
#endif /* __KERNEL__ */
#endif

View File

@@ -0,0 +1,39 @@
/*************************************************************************
*
* tape390.h
* enables user programs to display messages on the tape device
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
*
*************************************************************************/
#ifndef _TAPE390_H
#define _TAPE390_H
#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
/*
* The TAPE390_DISPLAY ioctl calls the Load Display command
* which transfers 17 bytes of data from the channel to the subsystem:
* - 1 format control byte, and
* - two 8-byte messages
*
* Format control byte:
* 0-2: New Message Overlay
* 3: Alternate Messages
* 4: Blink Message
* 5: Display Low/High Message
* 6: Reserved
* 7: Automatic Load Request
*
*/
typedef struct display_struct {
char cntrl;
char message1[8];
char message2[8];
} display_struct;
#endif

View File

@@ -0,0 +1,181 @@
/*
* include/asm-s390/termbits.h
*
* S390 version
*
* Derived from "include/asm-i386/termbits.h"
*/
#ifndef __ARCH_S390_TERMBITS_H__
#define __ARCH_S390_TERMBITS_H__
#include <linux/posix_types.h>
typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
struct termios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
};
/* c_cc characters */
#define VINTR 0
#define VQUIT 1
#define VERASE 2
#define VKILL 3
#define VEOF 4
#define VTIME 5
#define VMIN 6
#define VSWTC 7
#define VSTART 8
#define VSTOP 9
#define VSUSP 10
#define VEOL 11
#define VREPRINT 12
#define VDISCARD 13
#define VWERASE 14
#define VLNEXT 15
#define VEOL2 16
/* c_iflag bits */
#define IGNBRK 0000001
#define BRKINT 0000002
#define IGNPAR 0000004
#define PARMRK 0000010
#define INPCK 0000020
#define ISTRIP 0000040
#define INLCR 0000100
#define IGNCR 0000200
#define ICRNL 0000400
#define IUCLC 0001000
#define IXON 0002000
#define IXANY 0004000
#define IXOFF 0010000
#define IMAXBEL 0020000
#define IUTF8 0040000
/* c_oflag bits */
#define OPOST 0000001
#define OLCUC 0000002
#define ONLCR 0000004
#define OCRNL 0000010
#define ONOCR 0000020
#define ONLRET 0000040
#define OFILL 0000100
#define OFDEL 0000200
#define NLDLY 0000400
#define NL0 0000000
#define NL1 0000400
#define CRDLY 0003000
#define CR0 0000000
#define CR1 0001000
#define CR2 0002000
#define CR3 0003000
#define TABDLY 0014000
#define TAB0 0000000
#define TAB1 0004000
#define TAB2 0010000
#define TAB3 0014000
#define XTABS 0014000
#define BSDLY 0020000
#define BS0 0000000
#define BS1 0020000
#define VTDLY 0040000
#define VT0 0000000
#define VT1 0040000
#define FFDLY 0100000
#define FF0 0000000
#define FF1 0100000
/* c_cflag bit meaning */
#define CBAUD 0010017
#define B0 0000000 /* hang up */
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CSIZE 0000060
#define CS5 0000000
#define CS6 0000020
#define CS7 0000040
#define CS8 0000060
#define CSTOPB 0000100
#define CREAD 0000200
#define PARENB 0000400
#define PARODD 0001000
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
#define B460800 0010004
#define B500000 0010005
#define B576000 0010006
#define B921600 0010007
#define B1000000 0010010
#define B1152000 0010011
#define B1500000 0010012
#define B2000000 0010013
#define B2500000 0010014
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate (not used) */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
#define XCASE 0000004
#define ECHO 0000010
#define ECHOE 0000020
#define ECHOK 0000040
#define ECHONL 0000100
#define NOFLSH 0000200
#define TOSTOP 0000400
#define ECHOCTL 0001000
#define ECHOPRT 0002000
#define ECHOKE 0004000
#define FLUSHO 0010000
#define PENDIN 0040000
#define IEXTEN 0100000
/* tcflow() and TCXONC use these */
#define TCOOFF 0
#define TCOON 1
#define TCIOFF 2
#define TCION 3
/* tcflush() and TCFLSH use these */
#define TCIFLUSH 0
#define TCOFLUSH 1
#define TCIOFLUSH 2
/* tcsetattr uses these */
#define TCSANOW 0
#define TCSADRAIN 1
#define TCSAFLUSH 2
#endif

View File

@@ -0,0 +1,114 @@
/*
* include/asm-s390/termios.h
*
* S390 version
*
* Derived from "include/asm-i386/termios.h"
*/
#ifndef _S390_TERMIOS_H
#define _S390_TERMIOS_H
#include <asm/termbits.h>
#include <asm/ioctls.h>
struct winsize {
unsigned short ws_row;
unsigned short ws_col;
unsigned short ws_xpixel;
unsigned short ws_ypixel;
};
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
#define TIOCM_RTS 0x004
#define TIOCM_ST 0x008
#define TIOCM_SR 0x010
#define TIOCM_CTS 0x020
#define TIOCM_CAR 0x040
#define TIOCM_RNG 0x080
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
#define N_MOUSE 2
#define N_PPP 3
#define N_STRIP 4
#define N_AX25 5
#define N_X25 6 /* X.25 async */
#define N_6PACK 7
#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
#define N_R3964 9 /* Reserved for Simatic R3964 module */
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
#define N_HDLC 13 /* synchronous HDLC */
#define N_SYNC_PPP 14 /* synchronous PPP */
#define N_HCI 15 /* Bluetooth HCI UART */
#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
reprint=^R discard=^U werase=^W lnext=^V
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
unsigned short __tmp; \
get_user(__tmp,&(termio)->x); \
(termios)->x = (0xffff0000 & ((termios)->x)) | __tmp; \
}
#define user_termio_to_kernel_termios(termios, termio) \
({ \
SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
})
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
#define kernel_termios_to_user_termio(termio, termios) \
({ \
put_user((termios)->c_iflag, &(termio)->c_iflag); \
put_user((termios)->c_oflag, &(termio)->c_oflag); \
put_user((termios)->c_cflag, &(termio)->c_cflag); \
put_user((termios)->c_lflag, &(termio)->c_lflag); \
put_user((termios)->c_line, &(termio)->c_line); \
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
#endif /* _S390_TERMIOS_H */

View File

@@ -0,0 +1,119 @@
/*
* include/asm-s390/thread_info.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
/*
* Size of kernel stack for each process
*/
#ifndef __s390x__
#ifndef __SMALL_STACK
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else
#define THREAD_ORDER 0
#define ASYNC_ORDER 0
#endif
#else /* __s390x__ */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
#else
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#endif
#endif /* __s390x__ */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/lowcore.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned int cpu; /* current CPU */
unsigned int preempt_count; /* 0 => preemptable */
struct restart_block restart_block;
};
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE);
}
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
/*
* thread information flags bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_31BIT 18 /* 32bit process */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#endif /* __KERNEL__ */
#define PREEMPT_ACTIVE 0x4000000
#endif /* _ASM_THREAD_INFO_H */

View File

@@ -0,0 +1,48 @@
/*
* include/asm-s390/timer.h
*
* (C) Copyright IBM Corp. 2003
* Virtual CPU timer
*
* Author: Jan Glauber (jang@de.ibm.com)
*/
#ifndef _ASM_S390_TIMER_H
#define _ASM_S390_TIMER_H
#include <linux/timer.h>
#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
struct vtimer_list {
struct list_head entry;
int cpu;
__u64 expires;
__u64 interval;
spinlock_t lock;
unsigned long magic;
void (*function)(unsigned long, struct pt_regs*);
unsigned long data;
};
/* the offset value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
__u64 to_expire; /* current event expire time */
__u64 offset; /* list offset to zero */
__u64 idle; /* temp var for idle */
};
void set_vtimer(__u64 expires);
extern void init_virt_timer(struct vtimer_list *timer);
extern void add_virt_timer(void *new);
extern void add_virt_timer_periodic(void *new);
extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
extern int del_virt_timer(struct vtimer_list *timer);
#endif

View File

@@ -0,0 +1,36 @@
/*
* include/asm-s390/timex.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/timex.h"
* Copyright (C) 1992, Linus Torvalds
*/
#ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void)
{
cycles_t cycles;
__asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
return cycles >> 2;
}
static inline unsigned long long get_clock (void)
{
unsigned long long clk;
__asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
return clk;
}
#endif

View File

@@ -0,0 +1,20 @@
#ifndef _S390_TLB_H
#define _S390_TLB_H
/*
* s390 doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif

View File

@@ -0,0 +1,153 @@
#ifndef _S390_TLBFLUSH_H
#define _S390_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#define flush_tlb_kernel_range(start, end) \
local_flush_tlb();
#else
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb(void)
{
#ifndef __s390x__
if (!MACHINE_HAS_CSP) {
smp_ptlb_all();
return;
}
#endif /* __s390x__ */
{
register unsigned long addr asm("4");
long dummy;
dummy = 0;
addr = ((unsigned long) &dummy) + 1;
__asm__ __volatile__ (
" slr 2,2\n"
" slr 3,3\n"
" csp 2,%0"
: : "a" (addr), "m" (dummy) : "cc", "2", "3" );
}
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
cpumask_t local_cpumask;
if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return;
if (MACHINE_HAS_IDTE) {
asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048),
"a" (__pa(mm->pgd)&PAGE_MASK) : "cc" );
return;
}
preempt_disable();
local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
local_flush_tlb();
else
global_flush_tlb();
preempt_enable();
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#define flush_tlb_kernel_range(start, end) global_flush_tlb()
#endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
#endif /* _S390_TLBFLUSH_H */

View File

@@ -0,0 +1,23 @@
/*
* File...........: linux/include/asm/todclk.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
* History of changes (starts July 2000)
*/
#ifndef __ASM_TODCLK_H
#define __ASM_TODCLK_H
#ifdef __KERNEL__
#define TOD_uSEC (0x1000ULL)
#define TOD_mSEC (1000 * TOD_uSEC)
#define TOD_SEC (1000 * TOD_mSEC)
#define TOD_MIN (60 * TOD_SEC)
#define TOD_HOUR (60 * TOD_MIN)
#endif
#endif

View File

@@ -0,0 +1,6 @@
#ifndef _ASM_S390_TOPOLOGY_H
#define _ASM_S390_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */

View File

@@ -0,0 +1,101 @@
/*
* include/asm-s390/types.h
*
* S390 version
*
* Derived from "include/asm-i386/types.h"
*/
#ifndef _S390_TYPES_H
#define _S390_TYPES_H
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#ifndef __s390x__
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
#else /* __s390x__ */
typedef __signed__ long __s64;
typedef unsigned long __u64;
#endif
/* A address type so that arithmetic can be done on it & it can be upgraded to
64 bit when necessary
*/
typedef unsigned long addr_t;
typedef __signed__ long saddr_t;
#endif /* __ASSEMBLY__ */
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
#ifndef __s390x__
#define BITS_PER_LONG 32
#else
#define BITS_PER_LONG 64
#endif
#ifndef __ASSEMBLY__
#include <linux/config.h>
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
#ifndef __s390x__
typedef signed long long s64;
typedef unsigned long long u64;
#else /* __s390x__ */
typedef signed long s64;
typedef unsigned long u64;
#endif /* __s390x__ */
typedef u32 dma_addr_t;
typedef unsigned int kmem_bufctl_t;
#ifndef __s390x__
typedef union {
unsigned long long pair;
struct {
unsigned long even;
unsigned long odd;
} subreg;
} register_pair;
#ifdef CONFIG_LBD
typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
#endif /* ! __s390x__ */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _S390_TYPES_H */

View File

@@ -0,0 +1,436 @@
/*
* include/asm-s390/uaccess.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/uaccess.h"
*/
#ifndef __S390_UACCESS_H
#define __S390_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/sched.h>
#include <linux/errno.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
#define KERNEL_DS MAKE_MM_SEG(0)
#define USER_DS MAKE_MM_SEG(1)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
#ifdef __s390x__
#define set_fs(x) \
({ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \
})
#else
#define set_fs(x) \
({ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
})
#endif
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
#define __access_ok(addr,size) (1)
#define access_ok(type,addr,size) __access_ok(addr,size)
extern inline int verify_area(int type, const void __user *addr,
unsigned long size)
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
#ifndef __s390x__
#define __uaccess_fixup \
".section .fixup,\"ax\"\n" \
"2: lhi %0,%4\n" \
" bras 1,3f\n" \
" .long 1b\n" \
"3: l 1,0(1)\n" \
" br 1\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
".previous"
#define __uaccess_clobber "cc", "1"
#else /* __s390x__ */
#define __uaccess_fixup \
".section .fixup,\"ax\"\n" \
"2: lghi %0,%4\n" \
" jg 1b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 0b,2b\n" \
".previous"
#define __uaccess_clobber "cc"
#endif /* __s390x__ */
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __put_user_asm(x, ptr, err) \
({ \
err = 0; \
asm volatile( \
"0: mvcs 0(%1,%2),%3,%0\n" \
"1:\n" \
__uaccess_fixup \
: "+&d" (err) \
: "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \
"K" (-EFAULT) \
: __uaccess_clobber ); \
})
#else
#define __put_user_asm(x, ptr, err) \
({ \
err = 0; \
asm volatile( \
"0: mvcs 0(%1,%2),0(%3),%0\n" \
"1:\n" \
__uaccess_fixup \
: "+&d" (err) \
: "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \
"K" (-EFAULT), "m" (x) \
: __uaccess_clobber ); \
})
#endif
#ifndef __CHECKER__
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x = (x); \
int __pu_err; \
switch (sizeof (*(ptr))) { \
case 1: \
case 2: \
case 4: \
case 8: \
__put_user_asm(__x, ptr, __pu_err); \
break; \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
__pu_err; \
})
#else
#define __put_user(x, ptr) \
({ \
void __user *p; \
p = (ptr); \
0; \
})
#endif
#define put_user(x, ptr) \
({ \
might_sleep(); \
__put_user(x, ptr); \
})
extern int __put_user_bad(void);
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __get_user_asm(x, ptr, err) \
({ \
err = 0; \
asm volatile ( \
"0: mvcp %O1(%2,%R1),0(%3),%0\n" \
"1:\n" \
__uaccess_fixup \
: "+&d" (err), "=Q" (x) \
: "d" (sizeof(*(ptr))), "a" (ptr), \
"K" (-EFAULT) \
: __uaccess_clobber ); \
})
#else
#define __get_user_asm(x, ptr, err) \
({ \
err = 0; \
asm volatile ( \
"0: mvcp 0(%2,%5),0(%3),%0\n" \
"1:\n" \
__uaccess_fixup \
: "+&d" (err), "=m" (x) \
: "d" (sizeof(*(ptr))), "a" (ptr), \
"K" (-EFAULT), "a" (&(x)) \
: __uaccess_clobber ); \
})
#endif
#ifndef __CHECKER__
#define __get_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x; \
int __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
case 8: \
__get_user_asm(__x, ptr, __gu_err); \
break; \
default: \
__x = 0; \
__gu_err = __get_user_bad(); \
break; \
} \
(x) = __x; \
__gu_err; \
})
#else
#define __get_user(x, ptr) \
({ \
void __user *p; \
p = (ptr); \
0; \
})
#endif
#define get_user(x, ptr) \
({ \
might_sleep(); \
__get_user(x, ptr); \
})
extern int __get_user_bad(void);
#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user
extern long __copy_to_user_asm(const void *from, long n, void __user *to);
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_to_user_asm(from, n, to);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
extern long __copy_from_user_asm(void *to, long n, const void __user *from);
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_from_user_asm(to, n, from);
}
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
extern unsigned long __copy_in_user_asm(const void __user *from, long n,
void __user *to);
static inline unsigned long
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
return __copy_in_user_asm(from, n, to);
}
static inline unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_sleep();
if (__access_ok(from,n) && __access_ok(to,n))
n = __copy_in_user_asm(from, n, to);
return n;
}
/*
* Copy a null terminated string from userspace.
*/
extern long __strncpy_from_user_asm(long count, char *dst,
const char __user *src);
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
might_sleep();
if (access_ok(VERIFY_READ, src, 1))
res = __strncpy_from_user_asm(count, dst, src);
return res;
}
extern long __strnlen_user_asm(long count, const char __user *src);
static inline unsigned long
strnlen_user(const char __user * src, unsigned long n)
{
might_sleep();
return __strnlen_user_asm(n, src);
}
/**
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
#define strlen_user(str) strnlen_user(str, ~0UL)
/*
* Zero Userspace
*/
extern long __clear_user_asm(void __user *to, long n);
static inline unsigned long
__clear_user(void __user *to, unsigned long n)
{
return __clear_user_asm(to, n);
}
static inline unsigned long
clear_user(void __user *to, unsigned long n)
{
might_sleep();
if (access_ok(VERIFY_WRITE, to, n))
n = __clear_user_asm(to, n);
return n;
}
#endif /* __S390_UACCESS_H */

View File

@@ -0,0 +1,20 @@
/*
* include/asm-s390/ucontext.h
*
* S390 version
*
* Derived from "include/asm-i386/ucontext.h"
*/
#ifndef _ASM_S390_UCONTEXT_H
#define _ASM_S390_UCONTEXT_H
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
_sigregs uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* !_ASM_S390_UCONTEXT_H */

View File

@@ -0,0 +1,24 @@
/*
* include/asm-s390/unaligned.h
*
* S390 version
*
* Derived from "include/asm-i386/unaligned.h"
*/
#ifndef __S390_UNALIGNED_H
#define __S390_UNALIGNED_H
/*
* The S390 can do unaligned accesses itself.
*
* The strange macros are there to make sure these can't
* be misused in a way that makes them not work on other
* architectures where unaligned accesses aren't as simple.
*/
#define get_unaligned(ptr) (*(ptr))
#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
#endif

View File

@@ -0,0 +1,601 @@
/*
* include/asm-s390/unistd.h
*
* S390 version
*
* Derived from "include/asm-i386/unistd.h"
*/
#ifndef _ASM_S390_UNISTD_H_
#define _ASM_S390_UNISTD_H_
/*
* This file contains the system call numbers.
*/
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_restart_syscall 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_pause 29
#define __NR_utime 30
#define __NR_access 33
#define __NR_nice 34
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_signal 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
#define __NR_ioctl 54
#define __NR_fcntl 55
#define __NR_setpgid 57
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_sigaction 67
#define __NR_setreuid 70
#define __NR_setregid 71
#define __NR_sigsuspend 72
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
#define __NR_symlink 83
#define __NR_readlink 85
#define __NR_uselib 86
#define __NR_swapon 87
#define __NR_reboot 88
#define __NR_readdir 89
#define __NR_mmap 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
#define __NR_statfs 99
#define __NR_fstatfs 100
#define __NR_ioperm 101
#define __NR_socketcall 102
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_stat 106
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_lookup_dcookie 110
#define __NR_vhangup 111
#define __NR_idle 112
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
#define __NR_ipc 117
#define __NR_fsync 118
#define __NR_sigreturn 119
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
#define __NR_create_module 127
#define __NR_init_module 128
#define __NR_delete_module 129
#define __NR_get_kernel_syms 130
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
#define __NR_getdents 141
#define __NR__newselect 142
#define __NR_flock 143
#define __NR_msync 144
#define __NR_readv 145
#define __NR_writev 146
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
#define __NR_munlockall 153
#define __NR_sched_setparam 154
#define __NR_sched_getparam 155
#define __NR_sched_setscheduler 156
#define __NR_sched_getscheduler 157
#define __NR_sched_yield 158
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
#define __NR_query_module 167
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
#define __NR_getresgid 171
#define __NR_prctl 172
#define __NR_rt_sigreturn 173
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
#define __NR_rt_sigpending 176
#define __NR_rt_sigtimedwait 177
#define __NR_rt_sigqueueinfo 178
#define __NR_rt_sigsuspend 179
#define __NR_pread64 180
#define __NR_pwrite64 181
#define __NR_chown 182
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
#define __NR_getpmsg 188
#define __NR_putpmsg 189
#define __NR_vfork 190
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
#define __NR_stat64 195
#define __NR_lstat64 196
#define __NR_fstat64 197
#define __NR_lchown32 198
#define __NR_getuid32 199
#define __NR_getgid32 200
#define __NR_geteuid32 201
#define __NR_getegid32 202
#define __NR_setreuid32 203
#define __NR_setregid32 204
#define __NR_getgroups32 205
#define __NR_setgroups32 206
#define __NR_fchown32 207
#define __NR_setresuid32 208
#define __NR_getresuid32 209
#define __NR_setresgid32 210
#define __NR_getresgid32 211
#define __NR_chown32 212
#define __NR_setuid32 213
#define __NR_setgid32 214
#define __NR_setfsuid32 215
#define __NR_setfsgid32 216
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_fcntl64 221
#define __NR_readahead 222
#define __NR_sendfile64 223
#define __NR_setxattr 224
#define __NR_lsetxattr 225
#define __NR_fsetxattr 226
#define __NR_getxattr 227
#define __NR_lgetxattr 228
#define __NR_fgetxattr 229
#define __NR_listxattr 230
#define __NR_llistxattr 231
#define __NR_flistxattr 232
#define __NR_removexattr 233
#define __NR_lremovexattr 234
#define __NR_fremovexattr 235
#define __NR_gettid 236
#define __NR_tkill 237
#define __NR_futex 238
#define __NR_sched_setaffinity 239
#define __NR_sched_getaffinity 240
#define __NR_tgkill 241
/* Number 242 is reserved for tux */
#define __NR_io_setup 243
#define __NR_io_destroy 244
#define __NR_io_getevents 245
#define __NR_io_submit 246
#define __NR_io_cancel 247
#define __NR_exit_group 248
#define __NR_epoll_create 249
#define __NR_epoll_ctl 250
#define __NR_epoll_wait 251
#define __NR_set_tid_address 252
#define __NR_fadvise64 253
#define __NR_timer_create 254
#define __NR_timer_settime (__NR_timer_create+1)
#define __NR_timer_gettime (__NR_timer_create+2)
#define __NR_timer_getoverrun (__NR_timer_create+3)
#define __NR_timer_delete (__NR_timer_create+4)
#define __NR_clock_settime (__NR_timer_create+5)
#define __NR_clock_gettime (__NR_timer_create+6)
#define __NR_clock_getres (__NR_timer_create+7)
#define __NR_clock_nanosleep (__NR_timer_create+8)
/* Number 263 is reserved for vserver */
#define __NR_fadvise64_64 264
#define __NR_statfs64 265
#define __NR_fstatfs64 266
/* Number 267 is reserved for new sys_remap_file_pages */
/* Number 268 is reserved for new sys_mbind */
/* Number 269 is reserved for new sys_get_mempolicy */
/* Number 270 is reserved for new sys_set_mempolicy */
#define __NR_mq_open 271
#define __NR_mq_unlink 272
#define __NR_mq_timedsend 273
#define __NR_mq_timedreceive 274
#define __NR_mq_notify 275
#define __NR_mq_getsetattr 276
/* Number 277 is reserved for new sys_kexec_load */
#define NR_syscalls 278
/*
* There are some system calls that are not present on 64 bit, some
* have a different name although they do the same (e.g. __NR_chown32
* is __NR_chown on 64 bit).
*/
#ifdef __s390x__
#undef __NR_time
#undef __NR_lchown
#undef __NR_setuid
#undef __NR_getuid
#undef __NR_stime
#undef __NR_setgid
#undef __NR_getgid
#undef __NR_geteuid
#undef __NR_getegid
#undef __NR_setreuid
#undef __NR_setregid
#undef __NR_getrlimit
#undef __NR_getgroups
#undef __NR_setgroups
#undef __NR_fchown
#undef __NR_ioperm
#undef __NR_setfsuid
#undef __NR_setfsgid
#undef __NR__llseek
#undef __NR__newselect
#undef __NR_setresuid
#undef __NR_getresuid
#undef __NR_setresgid
#undef __NR_getresgid
#undef __NR_chown
#undef __NR_ugetrlimit
#undef __NR_mmap2
#undef __NR_truncate64
#undef __NR_ftruncate64
#undef __NR_stat64
#undef __NR_lstat64
#undef __NR_fstat64
#undef __NR_lchown32
#undef __NR_getuid32
#undef __NR_getgid32
#undef __NR_geteuid32
#undef __NR_getegid32
#undef __NR_setreuid32
#undef __NR_setregid32
#undef __NR_getgroups32
#undef __NR_setgroups32
#undef __NR_fchown32
#undef __NR_setresuid32
#undef __NR_getresuid32
#undef __NR_setresgid32
#undef __NR_getresgid32
#undef __NR_chown32
#undef __NR_setuid32
#undef __NR_setgid32
#undef __NR_setfsuid32
#undef __NR_setfsgid32
#undef __NR_getdents64
#undef __NR_fcntl64
#undef __NR_sendfile64
#undef __NR_fadvise64_64
#define __NR_select 142
#define __NR_getrlimit 191 /* SuS compliant getrlimit */
#define __NR_lchown 198
#define __NR_getuid 199
#define __NR_getgid 200
#define __NR_geteuid 201
#define __NR_getegid 202
#define __NR_setreuid 203
#define __NR_setregid 204
#define __NR_getgroups 205
#define __NR_setgroups 206
#define __NR_fchown 207
#define __NR_setresuid 208
#define __NR_getresuid 209
#define __NR_setresgid 210
#define __NR_getresgid 211
#define __NR_chown 212
#define __NR_setuid 213
#define __NR_setgid 214
#define __NR_setfsuid 215
#define __NR_setfsgid 216
#endif
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
#define __syscall_return(type, res) \
do { \
if ((unsigned long)(res) >= (unsigned long)(-125)) { \
errno = -(res); \
res = -1; \
} \
return (type) (res); \
} while (0)
#define _svc_clobber "1", "cc", "memory"
#define _syscall0(type,name) \
type name(void) { \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) { \
register type1 __arg1 asm("2") = arg1; \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name), \
"0" (__arg1) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1, type2 arg2) { \
register type1 __arg1 asm("2") = arg1; \
register type2 __arg2 asm("3") = arg2; \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name), \
"0" (__arg1), \
"d" (__arg2) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\
type name(type1 arg1, type2 arg2, type3 arg3) { \
register type1 __arg1 asm("2") = arg1; \
register type2 __arg2 asm("3") = arg2; \
register type3 __arg3 asm("4") = arg3; \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name), \
"0" (__arg1), \
"d" (__arg2), \
"d" (__arg3) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\
type4,name4) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
register type1 __arg1 asm("2") = arg1; \
register type2 __arg2 asm("3") = arg2; \
register type3 __arg3 asm("4") = arg3; \
register type4 __arg4 asm("5") = arg4; \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name), \
"0" (__arg1), \
"d" (__arg2), \
"d" (__arg3), \
"d" (__arg4) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\
type4,name4,type5,name5) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5) { \
register type1 __arg1 asm("2") = arg1; \
register type2 __arg2 asm("3") = arg2; \
register type3 __arg3 asm("4") = arg3; \
register type4 __arg4 asm("5") = arg4; \
register type5 __arg5 asm("6") = arg5; \
register long __svcres asm("2"); \
long __res; \
__asm__ __volatile__ ( \
" .if %1 < 256\n" \
" svc %b1\n" \
" .else\n" \
" la %%r1,%1\n" \
" svc 0\n" \
" .endif" \
: "=d" (__svcres) \
: "i" (__NR_##name), \
"0" (__arg1), \
"d" (__arg2), \
"d" (__arg3), \
"d" (__arg4), \
"d" (__arg5) \
: _svc_clobber ); \
__res = __svcres; \
__syscall_return(type,__res); \
}
#ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
# ifndef CONFIG_ARCH_S390X
# define __ARCH_WANT_STAT64
# endif
#endif
#ifdef __KERNEL_SYSCALLS__
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/stat.h>
#include <linux/syscalls.h>
/*
* we need this inline - forking from kernel space will result
* in NO COPY ON WRITE (!!!), until an execve is executed. This
* is no problem, but for the stack. This is handled by not letting
* main() use the stack at all after fork(). Thus, no function
* calls - which means inline code for fork too, as otherwise we
* would use the stack upon exit from 'fork()'.
*
* Actually only pause and fork are needed inline, so that there
* won't be any messing with the stack from main(), but we define
* some others too.
*/
#define __NR__exit __NR_exit
static inline _syscall0(pid_t,setsid)
static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
static inline _syscall1(int,close,int,fd)
static inline _syscall2(long,stat,char *,filename,struct stat *,statbuf)
static inline pid_t waitpid(int pid, int *wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);
}
struct mmap_arg_struct;
asmlinkage long sys_mmap2(struct mmap_arg_struct __user *arg);
asmlinkage long sys_execve(struct pt_regs regs);
asmlinkage long sys_clone(struct pt_regs regs);
asmlinkage long sys_fork(struct pt_regs regs);
asmlinkage long sys_vfork(struct pt_regs regs);
asmlinkage long sys_pipe(unsigned long __user *fildes);
asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
struct sigaction;
asmlinkage long sys_rt_sigaction(int sig,
const struct sigaction __user *act,
struct sigaction __user *oact,
size_t sigsetsize);
#endif
/*
* "Conditional" syscalls
*
* What we want is __attribute__((weak,alias("sys_ni_syscall"))),
* but it doesn't work on all toolchains, so we just do it by hand
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#endif /* _ASM_S390_UNISTD_H_ */

View File

@@ -0,0 +1,77 @@
/*
* include/asm-s390/user.h
*
* S390 version
*
* Derived from "include/asm-i386/usr.h"
*/
#ifndef _S390_USER_H
#define _S390_USER_H
#include <asm/page.h>
#include <linux/ptrace.h>
/* Core file format: The core file is written in such a way that gdb
can understand it and provide useful information to the user (under
linux we use the 'trad-core' bfd). There are quite a number of
obstacles to being able to view the contents of the floating point
registers, and until these are solved you will not be able to view the
contents of them. Actually, you can read in the core file and look at
the contents of the user struct to find out what the floating point
registers contain.
The actual file contents are as follows:
UPAGE: 1 page consisting of a user struct that tells gdb what is present
in the file. Directly after this is a copy of the task_struct, which
is currently not used by gdb, but it may come in useful at some point.
All of the registers are stored as part of the upage. The upage should
always be only one page.
DATA: The data area is stored. We use current->end_text to
current->brk to pick up all of the user variables, plus any memory
that may have been malloced. No attempt is made to determine if a page
is demand-zero or if a page is totally unused, we just cover the entire
range. All of the addresses are rounded in such a way that an integral
number of pages is written.
STACK: We need the stack information in order to get a meaningful
backtrace. We need to write the data from (esp) to
current->start_stack, so we round each of these off in order to be able
to write an integer number of pages.
The minimum core file size is 3 pages, or 12288 bytes.
*/
/*
* This is the old layout of "struct pt_regs", and
* is still the layout used by user mode (the new
* pt_regs doesn't have all registers as the kernel
* doesn't use the extra segment registers)
*/
/* When the kernel dumps core, it starts by dumping the user struct -
this will be used by gdb to figure out where the data and stack segments
are within the file, and what virtual addresses to use. */
struct user {
/* We start with the registers, to mimic the way that "memory" is returned
from the ptrace(3,...) function. */
struct user_regs_struct regs; /* Where the registers are actually stored */
/* The rest of this junk is to help gdb figure out what goes where */
unsigned long int u_tsize; /* Text segment size (pages). */
unsigned long int u_dsize; /* Data segment size (pages). */
unsigned long int u_ssize; /* Stack segment size (pages). */
unsigned long start_code; /* Starting virtual address of text. */
unsigned long start_stack; /* Starting virtual address of stack area.
This is actually the bottom of the stack,
the top of the stack is always found in the
esp register. */
long int signal; /* Signal that caused the core dump. */
struct user_regs_struct *u_ar0;
/* Used by gdb to help find the values for */
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
};
#define NBPG PAGE_SIZE
#define UPAGES 1
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _S390_USER_H */

View File

@@ -0,0 +1,372 @@
#ifndef __KERNEL__
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/hdreg.h>
#include <asm/dasd.h>
#endif
#define LINE_LENGTH 80
#define VTOC_START_CC 0x0
#define VTOC_START_HH 0x1
#define FIRST_USABLE_CYL 1
#define FIRST_USABLE_TRK 2
#define DASD_3380_TYPE 13148
#define DASD_3390_TYPE 13200
#define DASD_9345_TYPE 37701
#define DASD_3380_VALUE 0xbb60
#define DASD_3390_VALUE 0xe5a2
#define DASD_9345_VALUE 0xbc98
#define VOLSER_LENGTH 6
#define BIG_DISK_SIZE 0x10000
#define VTOC_ERROR "VTOC error:"
typedef struct ttr
{
__u16 tt;
__u8 r;
} __attribute__ ((packed)) ttr_t;
typedef struct cchhb
{
__u16 cc;
__u16 hh;
__u8 b;
} __attribute__ ((packed)) cchhb_t;
typedef struct cchh
{
__u16 cc;
__u16 hh;
} __attribute__ ((packed)) cchh_t;
typedef struct labeldate
{
__u8 year;
__u16 day;
} __attribute__ ((packed)) labeldate_t;
typedef struct volume_label
{
char volkey[4]; /* volume key = volume label */
char vollbl[4]; /* volume label */
char volid[6]; /* volume identifier */
__u8 security; /* security byte */
cchhb_t vtoc; /* VTOC address */
char res1[5]; /* reserved */
char cisize[4]; /* CI-size for FBA,... */
/* ...blanks for CKD */
char blkperci[4]; /* no of blocks per CI (FBA), blanks for CKD */
char labperci[4]; /* no of labels per CI (FBA), blanks for CKD */
char res2[4]; /* reserved */
char lvtoc[14]; /* owner code for LVTOC */
char res3[29]; /* reserved */
} __attribute__ ((packed)) volume_label_t;
typedef struct extent
{
__u8 typeind; /* extent type indicator */
__u8 seqno; /* extent sequence number */
cchh_t llimit; /* starting point of this extent */
cchh_t ulimit; /* ending point of this extent */
} __attribute__ ((packed)) extent_t;
typedef struct dev_const
{
__u16 DS4DSCYL; /* number of logical cyls */
__u16 DS4DSTRK; /* number of tracks in a logical cylinder */
__u16 DS4DEVTK; /* device track length */
__u8 DS4DEVI; /* non-last keyed record overhead */
__u8 DS4DEVL; /* last keyed record overhead */
__u8 DS4DEVK; /* non-keyed record overhead differential */
__u8 DS4DEVFG; /* flag byte */
__u16 DS4DEVTL; /* device tolerance */
__u8 DS4DEVDT; /* number of DSCB's per track */
__u8 DS4DEVDB; /* number of directory blocks per track */
} __attribute__ ((packed)) dev_const_t;
typedef struct format1_label
{
char DS1DSNAM[44]; /* data set name */
__u8 DS1FMTID; /* format identifier */
char DS1DSSN[6]; /* data set serial number */
__u16 DS1VOLSQ; /* volume sequence number */
labeldate_t DS1CREDT; /* creation date: ydd */
labeldate_t DS1EXPDT; /* expiration date */
__u8 DS1NOEPV; /* number of extents on volume */
__u8 DS1NOBDB; /* no. of bytes used in last direction blk */
__u8 DS1FLAG1; /* flag 1 */
char DS1SYSCD[13]; /* system code */
labeldate_t DS1REFD; /* date last referenced */
__u8 DS1SMSFG; /* system managed storage indicators */
__u8 DS1SCXTF; /* sec. space extension flag byte */
__u16 DS1SCXTV; /* secondary space extension value */
__u8 DS1DSRG1; /* data set organisation byte 1 */
__u8 DS1DSRG2; /* data set organisation byte 2 */
__u8 DS1RECFM; /* record format */
__u8 DS1OPTCD; /* option code */
__u16 DS1BLKL; /* block length */
__u16 DS1LRECL; /* record length */
__u8 DS1KEYL; /* key length */
__u16 DS1RKP; /* relative key position */
__u8 DS1DSIND; /* data set indicators */
__u8 DS1SCAL1; /* secondary allocation flag byte */
char DS1SCAL3[3]; /* secondary allocation quantity */
ttr_t DS1LSTAR; /* last used track and block on track */
__u16 DS1TRBAL; /* space remaining on last used track */
__u16 res1; /* reserved */
extent_t DS1EXT1; /* first extent description */
extent_t DS1EXT2; /* second extent description */
extent_t DS1EXT3; /* third extent description */
cchhb_t DS1PTRDS; /* possible pointer to f2 or f3 DSCB */
} __attribute__ ((packed)) format1_label_t;
typedef struct format4_label
{
char DS4KEYCD[44]; /* key code for VTOC labels: 44 times 0x04 */
__u8 DS4IDFMT; /* format identifier */
cchhb_t DS4HPCHR; /* highest address of a format 1 DSCB */
__u16 DS4DSREC; /* number of available DSCB's */
cchh_t DS4HCCHH; /* CCHH of next available alternate track */
__u16 DS4NOATK; /* number of remaining alternate tracks */
__u8 DS4VTOCI; /* VTOC indicators */
__u8 DS4NOEXT; /* number of extents in VTOC */
__u8 DS4SMSFG; /* system managed storage indicators */
__u8 DS4DEVAC; /* number of alternate cylinders.
Subtract from first two bytes of
DS4DEVSZ to get number of usable
cylinders. can be zero. valid
only if DS4DEVAV on. */
dev_const_t DS4DEVCT; /* device constants */
char DS4AMTIM[8]; /* VSAM time stamp */
char DS4AMCAT[3]; /* VSAM catalog indicator */
char DS4R2TIM[8]; /* VSAM volume/catalog match time stamp */
char res1[5]; /* reserved */
char DS4F6PTR[5]; /* pointer to first format 6 DSCB */
extent_t DS4VTOCE; /* VTOC extent description */
char res2[10]; /* reserved */
__u8 DS4EFLVL; /* extended free-space management level */
cchhb_t DS4EFPTR; /* pointer to extended free-space info */
char res3[9]; /* reserved */
} __attribute__ ((packed)) format4_label_t;
typedef struct ds5ext
{
__u16 t; /* RTA of the first track of free extent */
__u16 fc; /* number of whole cylinders in free ext. */
__u8 ft; /* number of remaining free tracks */
} __attribute__ ((packed)) ds5ext_t;
typedef struct format5_label
{
char DS5KEYID[4]; /* key identifier */
ds5ext_t DS5AVEXT; /* first available (free-space) extent. */
ds5ext_t DS5EXTAV[7]; /* seven available extents */
__u8 DS5FMTID; /* format identifier */
ds5ext_t DS5MAVET[18]; /* eighteen available extents */
cchhb_t DS5PTRDS; /* pointer to next format5 DSCB */
} __attribute__ ((packed)) format5_label_t;
typedef struct ds7ext
{
__u32 a; /* starting RTA value */
__u32 b; /* ending RTA value + 1 */
} __attribute__ ((packed)) ds7ext_t;
typedef struct format7_label
{
char DS7KEYID[4]; /* key identifier */
ds7ext_t DS7EXTNT[5]; /* space for 5 extent descriptions */
__u8 DS7FMTID; /* format identifier */
ds7ext_t DS7ADEXT[11]; /* space for 11 extent descriptions */
char res1[2]; /* reserved */
cchhb_t DS7PTRDS; /* pointer to next FMT7 DSCB */
} __attribute__ ((packed)) format7_label_t;
char * vtoc_ebcdic_enc (
unsigned char source[LINE_LENGTH],
unsigned char target[LINE_LENGTH],
int l);
char * vtoc_ebcdic_dec (
unsigned char source[LINE_LENGTH],
unsigned char target[LINE_LENGTH],
int l);
void vtoc_set_extent (
extent_t * ext,
__u8 typeind,
__u8 seqno,
cchh_t * lower,
cchh_t * upper);
void vtoc_set_cchh (
cchh_t * addr,
__u16 cc,
__u16 hh);
void vtoc_set_cchhb (
cchhb_t * addr,
__u16 cc,
__u16 hh,
__u8 b);
void vtoc_set_date (
labeldate_t * d,
__u8 year,
__u16 day);
void vtoc_volume_label_init (
volume_label_t *vlabel);
int vtoc_read_volume_label (
char * device,
unsigned long vlabel_start,
volume_label_t * vlabel);
int vtoc_write_volume_label (
char *device,
unsigned long vlabel_start,
volume_label_t *vlabel);
void vtoc_volume_label_set_volser (
volume_label_t *vlabel,
char *volser);
char *vtoc_volume_label_get_volser (
volume_label_t *vlabel,
char *volser);
void vtoc_volume_label_set_key (
volume_label_t *vlabel,
char *key);
void vtoc_volume_label_set_label (
volume_label_t *vlabel,
char *lbl);
char *vtoc_volume_label_get_label (
volume_label_t *vlabel,
char *lbl);
void vtoc_read_label (
char *device,
unsigned long position,
format1_label_t *f1,
format4_label_t *f4,
format5_label_t *f5,
format7_label_t *f7);
void vtoc_write_label (
char *device,
unsigned long position,
format1_label_t *f1,
format4_label_t *f4,
format5_label_t *f5,
format7_label_t *f7);
void vtoc_init_format1_label (
char *volid,
unsigned int blksize,
extent_t *part_extent,
format1_label_t *f1);
void vtoc_init_format4_label (
format4_label_t *f4lbl,
unsigned int usable_partitions,
unsigned int cylinders,
unsigned int tracks,
unsigned int blocks,
unsigned int blksize,
__u16 dev_type);
void vtoc_update_format4_label (
format4_label_t *f4,
cchhb_t *highest_f1,
__u16 unused_update);
void vtoc_init_format5_label (
format5_label_t *f5);
void vtoc_update_format5_label_add (
format5_label_t *f5,
int verbose,
int cyl,
int trk,
__u16 a,
__u16 b,
__u8 c);
void vtoc_update_format5_label_del (
format5_label_t *f5,
int verbose,
int cyl,
int trk,
__u16 a,
__u16 b,
__u8 c);
void vtoc_init_format7_label (
format7_label_t *f7);
void vtoc_update_format7_label_add (
format7_label_t *f7,
int verbose,
__u32 a,
__u32 b);
void vtoc_update_format7_label_del (
format7_label_t *f7,
int verbose,
__u32 a,
__u32 b);
void vtoc_set_freespace(
format4_label_t *f4,
format5_label_t *f5,
format7_label_t *f7,
char ch,
int verbose,
__u32 start,
__u32 stop,
int cyl,
int trk);

Some files were not shown because too many files have changed in this diff Show More