(2006-08-06) rescue-bootcd

This commit is contained in:
2006-08-06 00:00:00 +02:00
parent 2f796b816a
commit decb062d20
21091 changed files with 7076462 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
#
# Makefile for the S/390 common i/o drivers
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
obj-$(CONFIG_QDIO) += qdio.o

View File

@@ -0,0 +1,87 @@
/*
* drivers/s390/cio/airq.c
* S/390 common I/O routines -- support for adapter interruptions
*
* $Revision: 1.12 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include "cio_debug.h"
#include "airq.h"
static adapter_int_handler_t adapter_handler;
/*
* register for adapter interrupts
*
* With HiperSockets the zSeries architecture provides for
* means of adapter interrups, pseudo I/O interrupts that are
* not tied to an I/O subchannel, but to an adapter. However,
* it doesn't disclose the info how to enable/disable them, but
* to recognize them only. Perhaps we should consider them
* being shared interrupts, and thus build a linked list
* of adapter handlers ... to be evaluated ...
*/
int
s390_register_adapter_interrupt (adapter_int_handler_t handler)
{
int ret;
char dbf_txt[15];
CIO_TRACE_EVENT (4, "rgaint");
if (handler == NULL)
ret = -EINVAL;
else
ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
if (!ret)
synchronize_kernel();
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (4, dbf_txt);
return ret;
}
int
s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
{
int ret;
char dbf_txt[15];
CIO_TRACE_EVENT (4, "urgaint");
if (handler == NULL)
ret = -EINVAL;
else {
adapter_handler = NULL;
synchronize_kernel();
ret = 0;
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (4, dbf_txt);
return ret;
}
void
do_adapter_IO (void)
{
CIO_TRACE_EVENT (6, "doaio");
if (adapter_handler)
(*adapter_handler) ();
}
EXPORT_SYMBOL (s390_register_adapter_interrupt);
EXPORT_SYMBOL (s390_unregister_adapter_interrupt);

View File

@@ -0,0 +1,10 @@
#ifndef S390_AINTERRUPT_H
#define S390_AINTERRUPT_H
typedef int (*adapter_int_handler_t)(void);
extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
extern void do_adapter_IO (void);
#endif

View File

@@ -0,0 +1,351 @@
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
* $Revision: 1.33 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <asm/cio.h>
#include <asm/uaccess.h>
#include "blacklist.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
/*
* "Blacklisting" of certain devices:
* Device numbers given in the commandline as cio_ignore=... won't be known
* to Linux.
*
* These can be single devices or ranges of devices
*/
/* 65536 bits to indicate if a devno is blacklisted or not */
#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \
(8*sizeof(long)))
static unsigned long bl_dev[__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
* Function: blacklist_range
* (Un-)blacklist the devices from-to
*/
static inline void
blacklist_range (range_action action, unsigned int from, unsigned int to)
{
if (!to)
to = from;
if (from > to || to > __MAX_SUBCHANNELS) {
printk (KERN_WARNING "Invalid blacklist range "
"0x%04x to 0x%04x, skipping\n", from, to);
return;
}
for (; from <= to; from++) {
if (action == add)
set_bit (from, bl_dev);
else
clear_bit (from, bl_dev);
}
}
/*
* Function: blacklist_busid
* Get devno/busid from given string.
* Shamelessly grabbed from dasd_devmap.c.
*/
static inline int
blacklist_busid(char **str, int *id0, int *id1, int *devno)
{
int val, old_style;
char *sav;
sav = *str;
/* check for leading '0x' */
old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') {
*str += 2;
old_style = 1;
}
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
goto confused;
val = simple_strtoul(*str, str, 16);
if (old_style || (*str)[0] != '.') {
*id0 = *id1 = 0;
if (val < 0 || val > 0xffff)
goto confused;
*devno = val;
if ((*str)[0] != ',' && (*str)[0] != '-' &&
(*str)[0] != '\n' && (*str)[0] != '\0')
goto confused;
return 0;
}
/* New style x.y.z busid */
if (val < 0 || val > 0xff)
goto confused;
*id0 = val;
(*str)++;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
goto confused;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xff || (*str)++[0] != '.')
goto confused;
*id1 = val;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
goto confused;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xffff)
goto confused;
*devno = val;
if ((*str)[0] != ',' && (*str)[0] != '-' &&
(*str)[0] != '\n' && (*str)[0] != '\0')
goto confused;
return 0;
confused:
strsep(str, ",\n");
printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav);
return 1;
}
static inline int
blacklist_parse_parameters (char *str, range_action action)
{
unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
while (*str != 0 && *str != '\n') {
range_action ra = action;
while(*str == ',')
str++;
if (*str == '!') {
ra = !action;
++str;
}
/*
* Since we have to parse the proc commands and the
* kernel arguments we have to check four cases
*/
if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
from = 0;
to = __MAX_SUBCHANNELS;
str += 3;
} else {
int rc;
rc = blacklist_busid(&str, &from_id0,
&from_id1, &from);
if (rc)
continue;
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (*str == '-') {
str++;
rc = blacklist_busid(&str, &to_id0,
&to_id1, &to);
if (rc)
continue;
}
if (*str == '-') {
printk(KERN_WARNING "invalid cio_ignore "
"parameter '%s'\n",
strsep(&str, ",\n"));
continue;
}
if ((from_id0 != to_id0) || (from_id1 != to_id1)) {
printk(KERN_WARNING "invalid cio_ignore range "
"%x.%x.%04x-%x.%x.%04x\n",
from_id0, from_id1, from,
to_id0, to_id1, to);
continue;
}
}
/* FIXME: ignoring id0 and id1 here. */
pr_debug("blacklist_setup: adding range "
"from 0.0.%04x to 0.0.%04x\n", from, to);
blacklist_range (ra, from, to);
}
return 1;
}
/* Parsing the commandline for blacklist parameters, e.g. to blacklist
* bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
* - cio_ignore=1234-1236
* - cio_ignore=0x1234-0x1235,1236
* - cio_ignore=0x1234,1235-1236
* - cio_ignore=1236 cio_ignore=1234-0x1236
* - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
* - cio_ignore=0.0.1234-0.0.1236
* - cio_ignore=0.0.1234,0x1235,1236
* - ...
*/
static int __init
blacklist_setup (char *str)
{
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
return blacklist_parse_parameters (str, add);
}
__setup ("cio_ignore=", blacklist_setup);
/* Checking if devices are blacklisted */
/*
* Function: is_blacklisted
* Returns 1 if the given devicenumber can be found in the blacklist,
* otherwise 0.
* Used by validate_subchannel()
*/
int
is_blacklisted (int devno)
{
return test_bit (devno, bl_dev);
}
#ifdef CONFIG_PROC_FS
/*
* Function: s390_redo_validation
* Look for no longer blacklisted devices
* FIXME: there must be a better way to do this */
static inline void
s390_redo_validation (void)
{
unsigned int irq;
CIO_TRACE_EVENT (0, "redoval");
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int ret;
struct subchannel *sch;
sch = get_subchannel_by_schid(irq);
if (sch) {
/* Already known. */
put_device(&sch->dev);
continue;
}
ret = css_probe_device(irq);
if (ret == -ENXIO)
break; /* We're through. */
if (ret == -ENOMEM)
/*
* Stop validation for now. Bad, but no need for a
* panic.
*/
break;
}
}
/*
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
*/
static inline void
blacklist_parse_proc_parameters (char *buf)
{
if (strncmp (buf, "free ", 5) == 0) {
blacklist_parse_parameters (buf + 5, free);
} else if (strncmp (buf, "add ", 4) == 0) {
/*
* We don't need to check for known devices since
* css_probe_device will handle this correctly.
*/
blacklist_parse_parameters (buf + 4, add);
} else {
printk (KERN_WARNING "cio_ignore: Parse error; \n"
KERN_WARNING "try using 'free all|<devno-range>,"
"<devno-range>,...'\n"
KERN_WARNING "or 'add <devno-range>,"
"<devno-range>,...'\n");
return;
}
s390_redo_validation ();
}
/* FIXME: These should be real bus ids and not home-grown ones! */
static int cio_ignore_read (char *page, char **start, off_t off,
int count, int *eof, void *data)
{
const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
long devno;
int len;
len = 0;
for (devno = off; /* abuse the page variable
* as counter, see fs/proc/generic.c */
devno <= __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
if (!test_bit(devno, bl_dev))
continue;
len += sprintf(page + len, "0.0.%04lx", devno);
if (test_bit(devno + 1, bl_dev)) { /* print range */
while (++devno < __MAX_SUBCHANNELS)
if (!test_bit(devno, bl_dev))
break;
len += sprintf(page + len, "-0.0.%04lx", --devno);
}
len += sprintf(page + len, "\n");
}
if (devno <= __MAX_SUBCHANNELS)
*eof = 1;
*start = (char *) (devno - off); /* number of checked entries */
return len;
}
static int cio_ignore_write(struct file *file, const char __user *user_buf,
unsigned long user_len, void *data)
{
char *buf;
if (user_len > 65536)
user_len = 65536;
buf = vmalloc (user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
vfree (buf);
return -EFAULT;
}
buf[user_len] = '\0';
blacklist_parse_proc_parameters (buf);
vfree (buf);
return user_len;
}
static int
cio_ignore_proc_init (void)
{
struct proc_dir_entry *entry;
entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
&proc_root);
if (!entry)
return 0;
entry->read_proc = cio_ignore_read;
entry->write_proc = cio_ignore_write;
return 1;
}
__initcall (cio_ignore_proc_init);
#endif /* CONFIG_PROC_FS */

View File

@@ -0,0 +1,6 @@
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
extern int is_blacklisted (int devno);
#endif

View File

@@ -0,0 +1,482 @@
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
* $Revision: 1.29 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/semaphore.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
/* a device matches a driver if all its slave devices match the same
* entry of the driver */
static int
ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
gdev = container_of(dev, struct ccwgroup_device, dev);
gdrv = container_of(drv, struct ccwgroup_driver, driver);
if (gdev->creator_id == gdrv->driver_id)
return 1;
return 0;
}
static int
ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer,
int buffer_size)
{
/* TODO */
return 0;
}
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.hotplug = ccwgroup_hotplug,
};
static inline void
__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
static ssize_t
ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
gdev = to_ccwgroupdev(dev);
if (gdev->state != CCWGROUP_OFFLINE)
return -EINVAL;
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static void
ccwgroup_release (struct device *dev)
{
struct ccwgroup_device *gdev;
int i;
gdev = to_ccwgroupdev(dev);
for (i = 0; i < gdev->count; i++) {
gdev->cdev[i]->dev.driver_data = NULL;
put_device(&gdev->cdev[i]->dev);
}
kfree(gdev);
}
static inline int
__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
"group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
/*
* try to add a new ccwgroup device for one driver
* argc and argv[] are a list of bus_id's of devices
* belonging to the driver.
*/
int
ccwgroup_create(struct device *root,
unsigned int creator_id,
struct ccw_driver *cdrv,
int argc, char *argv[])
{
struct ccwgroup_device *gdev;
int i;
int rc;
int del_drvdata;
if (argc > 256) /* disallow dumb users */
return -EINVAL;
gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0]));
atomic_set(&gdev->onoff, 0);
del_drvdata = 0;
for (i = 0; i < argc; i++) {
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
/* all devices have to be of the same type in
* order to be grouped */
if (!gdev->cdev[i]
|| gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto free_dev;
}
/* Don't allow a device to belong to more than one group. */
if (gdev->cdev[i]->dev.driver_data) {
rc = -EINVAL;
goto free_dev;
}
}
for (i = 0; i < argc; i++)
gdev->cdev[i]->dev.driver_data = gdev;
del_drvdata = 1;
gdev->creator_id = creator_id;
gdev->count = argc;
gdev->dev = (struct device ) {
.bus = &ccwgroup_bus_type,
.parent = root,
.release = ccwgroup_release,
};
snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
gdev->cdev[0]->dev.bus_id);
rc = device_register(&gdev->dev);
if (rc)
goto free_dev;
get_device(&gdev->dev);
rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
if (rc) {
device_unregister(&gdev->dev);
goto error;
}
rc = __ccwgroup_create_symlinks(gdev);
if (!rc) {
put_device(&gdev->dev);
return 0;
}
device_remove_file(&gdev->dev, &dev_attr_ungroup);
device_unregister(&gdev->dev);
error:
for (i = 0; i < argc; i++)
if (gdev->cdev[i]) {
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i]->dev.driver_data = NULL;
}
put_device(&gdev->dev);
return rc;
free_dev:
for (i = 0; i < argc; i++)
if (gdev->cdev[i]) {
put_device(&gdev->cdev[i]->dev);
if (del_drvdata)
gdev->cdev[i]->dev.driver_data = NULL;
}
kfree(gdev);
return rc;
}
static int __init
init_ccwgroup (void)
{
return bus_register (&ccwgroup_bus_type);
}
static void __exit
cleanup_ccwgroup (void)
{
bus_unregister (&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static int
ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_compare_and_swap(0, 1, &gdev->onoff))
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_online(gdev)))
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static int
ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_compare_and_swap(0, 1, &gdev->onoff))
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_offline(gdev)))
goto out;
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static ssize_t
ccwgroup_online_store (struct device *dev, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
unsigned int value;
int ret;
gdev = to_ccwgroupdev(dev);
if (!dev->driver)
return count;
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if (!try_module_get(gdrv->owner))
return -EINVAL;
value = simple_strtoul(buf, 0, 0);
ret = count;
if (value == 1)
ccwgroup_set_online(gdev);
else if (value == 0)
ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
module_put(gdrv->owner);
return ret;
}
static ssize_t
ccwgroup_online_show (struct device *dev, char *buf)
{
int online;
online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
return sprintf(buf, online ? "1\n" : "0\n");
}
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static int
ccwgroup_probe (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
int ret;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
return ret;
}
static int
ccwgroup_remove (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
device_remove_file(dev, &dev_attr_online);
if (gdrv && gdrv->remove)
gdrv->remove(gdev);
return 0;
}
int
ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver = (struct device_driver) {
.bus = &ccwgroup_bus_type,
.name = cdriver->name,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
};
return driver_register(&cdriver->driver);
}
static inline struct device *
__get_next_ccwgroup_device(struct device_driver *drv)
{
struct device *dev, *d;
down_read(&drv->bus->subsys.rwsem);
dev = NULL;
list_for_each_entry(d, &drv->devices, driver_list) {
dev = get_device(d);
if (dev)
break;
}
up_read(&drv->bus->subsys.rwsem);
return dev;
}
void
ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver);
while ((dev = __get_next_ccwgroup_device(&cdriver->driver))) {
__ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
device_unregister(dev);
put_device(dev);
};
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
int
ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
static inline struct ccwgroup_device *
__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
if (cdev->dev.driver_data) {
gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
if (get_device(&gdev->dev)) {
if (!list_empty(&gdev->dev.node))
return gdev;
put_device(&gdev->dev);
}
return NULL;
}
return NULL;
}
void
ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
gdev = __ccwgroup_get_gdev_by_cdev(cdev);
if (gdev) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
put_device(&gdev->dev);
}
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccwgroup_driver_register);
EXPORT_SYMBOL(ccwgroup_driver_unregister);
EXPORT_SYMBOL(ccwgroup_create);
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
#ifndef S390_CHSC_H
#define S390_CHSC_H
#define NR_CHPIDS 256
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
struct chsc_header {
u16 length;
u16 code;
};
struct channel_path {
int id;
int state;
struct device dev;
};
extern struct channel_path *chps[];
extern void s390_process_css( void );
extern void chsc_validate_chpids(struct subchannel *);
extern void chpid_is_actually_online(int);
struct css_general_char {
u64 : 41;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 : 2;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 10;
u32 aif_osa : 1; /* bit 67 */
u32 : 28;
}__attribute__((packed));
struct css_chsc_char {
u64 res;
u64 : 43;
u32 scssc : 1; /* bit 107 */
u32 scsscf : 1; /* bit 108 */
u32 : 19;
}__attribute__((packed));
extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
#endif

View File

@@ -0,0 +1,842 @@
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
* $Revision: 1.128 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
#include "chsc.h"
#include "ioasm.h"
#include "blacklist.h"
#include "cio_debug.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
debug_info_t *cio_debug_crw_id;
int cio_show_msg;
static int __init
cio_setup (char *parm)
{
if (!strcmp (parm, "yes"))
cio_show_msg = 1;
else if (!strcmp (parm, "no"))
cio_show_msg = 0;
else
printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
parm);
return 1;
}
__setup ("cio_msg=", cio_setup);
/*
* Function: cio_debug_init
* Initializes three debug logs (under /proc/s390dbf) for common I/O:
* - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
* - cio_trace logs the calling of different functions
* - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
* debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
*/
static int __init
cio_debug_init (void)
{
cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
debug_set_level (cio_debug_msg_id, 2);
cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
if (!cio_debug_trace_id)
goto out_unregister;
debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level (cio_debug_trace_id, 2);
cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
debug_set_level (cio_debug_crw_id, 2);
pr_debug("debugging initialized\n");
return 0;
out_unregister:
if (cio_debug_msg_id)
debug_unregister (cio_debug_msg_id);
if (cio_debug_trace_id)
debug_unregister (cio_debug_trace_id);
if (cio_debug_crw_id)
debug_unregister (cio_debug_crw_id);
pr_debug("could not initialize debugging\n");
return -1;
}
arch_initcall (cio_debug_init);
int
cio_set_options (struct subchannel *sch, int flags)
{
sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
return 0;
}
/* FIXME: who wants to use this? */
int
cio_get_options (struct subchannel *sch)
{
int flags;
flags = 0;
if (sch->options.suspend)
flags |= DOIO_ALLOW_SUSPEND;
if (sch->options.prefetch)
flags |= DOIO_DENY_PREFETCH;
if (sch->options.inter)
flags |= DOIO_SUPPRESS_INTER;
return flags;
}
/*
* Use tpi to get a pending interrupt, call the interrupt handler and
* return a pointer to the subchannel structure.
*/
static inline int
cio_tpi(void)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
if (tpi (NULL) != 1)
return 0;
irb = (struct irb *) __LC_IRB;
/* Store interrupt response block to lowcore. */
if (tsch (tpi_info->irq, irb) != 0)
/* Not status pending or not operational. */
return 1;
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch)
return 1;
local_bh_disable();
irq_enter ();
spin_lock(&sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev);
spin_unlock(&sch->lock);
irq_exit ();
__local_bh_enable();
return 1;
}
static inline int
cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
{
char dbf_text[15];
if (lpm != 0)
sch->lpm &= ~lpm;
else
sch->lpm = 0;
stsch (sch->irq, &sch->schib);
CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
"subchannel %04x!\n", sch->irq);
sprintf(dbf_text, "no%s", sch->dev.bus_id);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
return (sch->lpm ? -EACCES : -ENODEV);
}
int
cio_start (struct subchannel *sch, /* subchannel structure */
struct ccw1 * cpa, /* logical channel prog addr */
__u8 lpm) /* logical path mask */
{
char dbf_txt[15];
int ccode;
CIO_TRACE_EVENT (4, "stIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
/* sch is always under 2G. */
sch->orb.intparm = (__u32)(unsigned long)sch;
sch->orb.fmt = 1;
sch->orb.pfch = sch->options.prefetch == 0;
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
#ifdef CONFIG_ARCH_S390X
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
sch->orb.c64 = 1;
sch->orb.i2k = 0;
#endif
sch->orb.cpa = (__u32) __pa (cpa);
/*
* Issue "Start subchannel" and process condition code
*/
ccode = ssch (sch->irq, &sch->orb);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (4, dbf_txt);
switch (ccode) {
case 0:
/*
* initialize device status information
*/
sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
default: /* device/path not operational */
return cio_start_handle_notoper(sch, lpm);
}
}
/*
* resume suspended I/O operation
*/
int
cio_resume (struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
CIO_TRACE_EVENT (4, "resIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
ccode = rsch (sch->irq);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (4, dbf_txt);
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
return 0;
case 1:
return -EBUSY;
case 2:
return -EINVAL;
default:
/*
* useless to wait for request completion
* as device is no longer operational !
*/
return -ENODEV;
}
}
/*
* halt I/O operation
*/
int
cio_halt(struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "haltIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
/*
* Issue "Halt subchannel" and process condition code
*/
ccode = hsch (sch->irq);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
default: /* device not operational */
return -ENODEV;
}
}
/*
* Clear I/O operation
*/
int
cio_clear(struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "clearIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
/*
* Issue "Clear subchannel" and process condition code
*/
ccode = csch (sch->irq);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
return 0;
default: /* device not operational */
return -ENODEV;
}
}
/*
* Function: cio_cancel
* Issues a "Cancel Subchannel" on the specified subchannel
* Note: We don't need any fancy intparms and flags here
* since xsch is executed synchronously.
* Only for common I/O internal use as for now.
*/
int
cio_cancel (struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "cancelIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = xsch (sch->irq);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
switch (ccode) {
case 0: /* success */
/* Update information in scsw. */
stsch (sch->irq, &sch->schib);
return 0;
case 1: /* status pending */
return -EBUSY;
case 2: /* not applicable */
return -EINVAL;
default: /* not oper */
return -ENODEV;
}
}
/*
* Function: cio_modify
* Issues a "Modify Subchannel" on the specified subchannel
*/
int
cio_modify (struct subchannel *sch)
{
int ccode, retry, ret;
ret = 0;
for (retry = 0; retry < 5; retry++) {
ccode = msch_err (sch->irq, &sch->schib);
if (ccode < 0) /* -EIO if msch gets a program check. */
return ccode;
switch (ccode) {
case 0: /* successfull */
return 0;
case 1: /* status pending */
return -EBUSY;
case 2: /* busy */
udelay (100); /* allow for recovery */
ret = -EBUSY;
break;
case 3: /* not operational */
return -ENODEV;
}
}
return ret;
}
/*
* Enable subchannel.
*/
int
cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
{
char dbf_txt[15];
int ccode;
int retry;
int ret;
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = stsch (sch->irq, &sch->schib);
if (ccode)
return -ENODEV;
for (retry = 5, ret = 0; retry > 0; retry--) {
sch->schib.pmcw.ena = 1;
sch->schib.pmcw.isc = isc;
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ret = cio_modify(sch);
if (ret == -ENODEV)
break;
if (ret == -EIO)
/*
* Got a program check in cio_modify. Try without
* the concurrent sense bit the next time.
*/
sch->schib.pmcw.csense = 0;
if (ret == 0) {
stsch (sch->irq, &sch->schib);
if (sch->schib.pmcw.ena)
break;
}
if (ret == -EBUSY) {
struct irb irb;
if (tsch(sch->irq, &irb) != 0)
break;
}
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
}
/*
* Disable subchannel.
*/
int
cio_disable_subchannel (struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
int retry;
int ret;
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
ccode = stsch (sch->irq, &sch->schib);
if (ccode == 3) /* Not operational. */
return -ENODEV;
if (sch->schib.scsw.actl != 0)
/*
* the disable function must not be called while there are
* requests pending for completion !
*/
return -EBUSY;
for (retry = 5, ret = 0; retry > 0; retry--) {
sch->schib.pmcw.ena = 0;
ret = cio_modify(sch);
if (ret == -ENODEV)
break;
if (ret == -EBUSY)
/*
* The subchannel is busy or status pending.
* We'll disable when the next interrupt was delivered
* via the state machine.
*/
break;
if (ret == 0) {
stsch (sch->irq, &sch->schib);
if (!sch->schib.pmcw.ena)
break;
}
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
}
/*
* cio_validate_subchannel()
*
* Find out subchannel type and initialize struct subchannel.
* Return codes:
* SUBCHANNEL_TYPE_IO for a normal io subchannel
* SUBCHANNEL_TYPE_CHSC for a chsc subchannel
* SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
* SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
* -ENXIO for non-defined subchannels
* -ENODEV for subchannels with invalid device number or blacklisted devices
*/
int
cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
{
char dbf_txt[15];
int ccode;
sprintf (dbf_txt, "valsch%x", irq);
CIO_TRACE_EVENT (4, dbf_txt);
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
spin_lock_init(&sch->lock);
/* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
*/
sch->irq = irq;
ccode = stsch (irq, &sch->schib);
if (ccode)
return -ENXIO;
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
/*
* ... just being curious we check for non I/O subchannels
*/
if (sch->st != 0) {
CIO_DEBUG(KERN_INFO, 0,
"Subchannel %04X reports "
"non-I/O subchannel type %04X\n",
sch->irq, sch->st);
/* We stop here for non-io subchannels. */
return sch->st;
}
/* Initialization for io subchannels. */
if (!sch->schib.pmcw.dnv)
/* io subchannel but device number is invalid. */
return -ENODEV;
/* Devno is valid. */
if (is_blacklisted (sch->schib.pmcw.dev)) {
/*
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
CIO_MSG_EVENT(0, "Blacklisted device detected "
"at devno %04X\n", sch->schib.pmcw.dev);
return -ENODEV;
}
sch->opm = 0xff;
chsc_validate_chpids(sch);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom &
sch->opm;
CIO_DEBUG(KERN_INFO, 0,
"Detected device %04X on subchannel %04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/*
* We now have to initially ...
* ... set "interruption subclass"
* ... enable "concurrent sense"
* ... enable "multipath mode" if more than one
* CHPID is available. This is done regardless
* whether multiple paths are available for us.
*/
sch->schib.pmcw.isc = 3; /* could be smth. else */
sch->schib.pmcw.csense = 1; /* concurrent sense */
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
return 0;
}
/*
* do_IRQ() handles all normal I/O device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*
*/
void
do_IRQ (struct pt_regs *regs)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
irq_enter ();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
account_ticks(regs);
/*
* Get interrupt information from lowcore
*/
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
irb = (struct irb *) __LC_IRB;
do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
/*
* Non I/O-subchannel thin interrupts are processed differently
*/
if (tpi_info->adapter_IO == 1 &&
tpi_info->int_type == IO_INTERRUPT_TYPE) {
do_adapter_IO();
continue;
}
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch)
spin_lock(&sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch (tpi_info->irq, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw,
sizeof (irb->scsw));
/* Call interrupt handler if there is one. */
if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev);
}
if (sch)
spin_unlock(&sch->lock);
/*
* Are more interrupts pending?
* If so, the tpi instruction will update the lowcore
* to hold the info for the next interrupt.
* We don't do this for VM because a tpi drops the cpu
* out of the sie which costs more cycles than it saves.
*/
} while (!MACHINE_IS_VM && tpi (NULL) != 0);
irq_exit ();
}
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
static int console_subchannel_in_use;
/*
* busy wait for the next interrupt on the console
*/
void
wait_cons_dev (void)
{
unsigned long cr6 __attribute__ ((aligned (8)));
unsigned long save_cr6 __attribute__ ((aligned (8)));
/*
* before entering the spinlock we may already have
* processed the interrupt on a different CPU...
*/
if (!console_subchannel_in_use)
return;
/* disable all but isc 7 (console device) */
__ctl_store (save_cr6, 6, 6);
cr6 = 0x01000000;
__ctl_load (cr6, 6, 6);
do {
spin_unlock(&console_subchannel.lock);
if (!cio_tpi())
cpu_relax();
spin_lock(&console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0);
/*
* restore previous isc value
*/
__ctl_load (save_cr6, 6, 6);
}
static int
cio_console_irq(void)
{
int irq;
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
if (stsch(console_irq, &console_subchannel.schib) != 0 ||
!console_subchannel.schib.pmcw.dnv)
return -1;
console_devno = console_subchannel.schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
if (stsch(irq, &console_subchannel.schib) != 0)
break;
if (console_subchannel.schib.pmcw.dnv &&
console_subchannel.schib.pmcw.dev ==
console_devno) {
console_irq = irq;
break;
}
}
if (console_irq == -1)
return -1;
} else {
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized.
* With some luck, the HWC console can take over */
printk(KERN_WARNING "No ccw console found!\n");
return -1;
}
return console_irq;
}
struct subchannel *
cio_probe_console(void)
{
int irq, ret;
if (xchg(&console_subchannel_in_use, 1) != 0)
return ERR_PTR(-EBUSY);
irq = cio_console_irq();
if (irq == -1) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
ret = cio_validate_subchannel(&console_subchannel, irq);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
}
/*
* enable console I/O-interrupt subclass 7
*/
ctl_set_bit(6, 24);
console_subchannel.schib.pmcw.isc = 7;
console_subchannel.schib.pmcw.intparm =
(__u32)(unsigned long)&console_subchannel;
ret = cio_modify(&console_subchannel);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(ret);
}
return &console_subchannel;
}
void
cio_release_console(void)
{
console_subchannel.schib.pmcw.intparm = 0;
cio_modify(&console_subchannel);
ctl_clear_bit(6, 24);
console_subchannel_in_use = 0;
}
/* Bah... hack to catch console special sausages. */
int
cio_is_console(int irq)
{
if (!console_subchannel_in_use)
return 0;
return (irq == console_subchannel.irq);
}
struct subchannel *
cio_get_console_subchannel(void)
{
if (!console_subchannel_in_use)
return 0;
return &console_subchannel;
}
#endif
static inline int
__disable_subchannel_easy(unsigned int schid, struct schib *schib)
{
int retry, cc;
cc = 0;
for (retry=0;retry<3;retry++) {
schib->pmcw.ena = 0;
cc = msch(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
stsch(schid, schib);
if (!schib->pmcw.ena)
return 0;
}
return -EBUSY; /* uhm... */
}
static inline int
__clear_subchannel_easy(unsigned int schid)
{
int retry;
if (csch(schid))
return -ENODEV;
for (retry=0;retry<20;retry++) {
struct tpi_info ti;
if (tpi(&ti)) {
tsch(schid, (struct irb *)__LC_IRB);
return 0;
}
udelay(100);
}
return -EBUSY;
}
extern void do_reipl(unsigned long devno);
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
void
reipl(unsigned long devno)
{
unsigned int schid;
local_irq_disable();
for (schid=0;schid<=highest_subchannel;schid++) {
struct schib schib;
if (stsch(schid, &schib))
goto out;
if (!schib.pmcw.ena)
continue;
switch(__disable_subchannel_easy(schid, &schib)) {
case 0:
case -ENODEV:
break;
default: /* -EBUSY */
if (__clear_subchannel_easy(schid))
break; /* give up... */
stsch(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
}
out:
do_reipl(devno);
}

View File

@@ -0,0 +1,142 @@
#ifndef S390_CIO_H
#define S390_CIO_H
/*
* where we put the ssd info
*/
struct ssd_info {
__u8 valid:1;
__u8 type:7; /* subchannel type */
__u8 chpid[8]; /* chpids */
__u16 fla[8]; /* full link addresses */
} __attribute__ ((packed));
/*
* path management control word
*/
struct pmcw {
__u32 intparm; /* interruption parameter */
__u32 qf : 1; /* qdio facility */
__u32 res0 : 1; /* reserved zeros */
__u32 isc : 3; /* interruption sublass */
__u32 res5 : 3; /* reserved zeros */
__u32 ena : 1; /* enabled */
__u32 lm : 2; /* limit mode */
__u32 mme : 2; /* measurement-mode enable */
__u32 mp : 1; /* multipath mode */
__u32 tf : 1; /* timing facility */
__u32 dnv : 1; /* device number valid */
__u32 dev : 16; /* device number */
__u8 lpm; /* logical path mask */
__u8 pnom; /* path not operational mask */
__u8 lpum; /* last path used mask */
__u8 pim; /* path installed mask */
__u16 mbi; /* measurement-block index */
__u8 pom; /* path operational mask */
__u8 pam; /* path available mask */
__u8 chpid[8]; /* CHPID 0-7 (if available) */
__u32 unused1 : 8; /* reserved zeros */
__u32 st : 3; /* subchannel type */
__u32 unused2 : 18; /* reserved zeros */
__u32 mbfc : 1; /* measurement block format control */
__u32 xmwme : 1; /* extended measurement word mode enable */
__u32 csense : 1; /* concurrent sense; can be enabled ...*/
/* ... per MSCH, however, if facility */
/* ... is not installed, this results */
/* ... in an operand exception. */
} __attribute__ ((packed));
/*
* subchannel information block
*/
struct schib {
struct pmcw pmcw; /* path management control word */
struct scsw scsw; /* subchannel status word */
__u64 mba; /* measurement block address */
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
/*
* operation request block
*/
struct orb {
__u32 intparm; /* interruption parameter */
__u32 key : 4; /* flags, like key, suspend control, etc. */
__u32 spnd : 1; /* suspend control */
__u32 res1 : 1; /* reserved */
__u32 mod : 1; /* modification control */
__u32 sync : 1; /* synchronize control */
__u32 fmt : 1; /* format control */
__u32 pfch : 1; /* prefetch control */
__u32 isic : 1; /* initial-status-interruption control */
__u32 alcc : 1; /* address-limit-checking control */
__u32 ssic : 1; /* suppress-suspended-interr. control */
__u32 res2 : 1; /* reserved */
__u32 c64 : 1; /* IDAW/QDIO 64 bit control */
__u32 i2k : 1; /* IDAW 2/4kB block size control */
__u32 lpm : 8; /* logical path mask */
__u32 ils : 1; /* incorrect length */
__u32 zero : 6; /* reserved zeros */
__u32 orbx : 1; /* ORB extension control */
__u32 cpa; /* channel program address */
} __attribute__ ((packed,aligned(4)));
/* subchannel data structure used by I/O subroutines */
struct subchannel {
unsigned int irq; /* aka. subchannel number */
spinlock_t lock; /* subchannel lock */
enum {
SUBCHANNEL_TYPE_IO = 0,
SUBCHANNEL_TYPE_CHSC = 1,
SUBCHANNEL_TYPE_MESSAGE = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __attribute__ ((packed)) options;
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
struct schib schib; /* subchannel information block */
struct orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
struct ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
} __attribute__ ((aligned(8)));
#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
#define to_subchannel(n) container_of(n, struct subchannel, dev)
extern int cio_validate_subchannel (struct subchannel *, unsigned int);
extern int cio_enable_subchannel (struct subchannel *, unsigned int);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
extern int cio_clear (struct subchannel *);
extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(int irq);
extern struct subchannel *cio_get_console_subchannel(void);
#else
#define cio_is_console(irq) 0
#define cio_get_console_subchannel() NULL
#endif
extern int cio_show_msg;
#endif

View File

@@ -0,0 +1,32 @@
#ifndef CIO_DEBUG_H
#define CIO_DEBUG_H
#include <asm/debug.h>
#define CIO_TRACE_EVENT(imp, txt) do { \
debug_text_event(cio_debug_trace_id, imp, txt); \
} while (0)
#define CIO_MSG_EVENT(imp, args...) do { \
debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
} while (0)
#define CIO_CRW_EVENT(imp, args...) do { \
debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
} while (0)
#define CIO_HEX_EVENT(imp, args...) do { \
debug_event(cio_debug_trace_id, imp, ##args); \
} while (0)
#define CIO_DEBUG(printk_level,event_level,msg...) ({ \
if (cio_show_msg) printk(printk_level msg); \
CIO_MSG_EVENT (event_level, msg); \
})
/* for use of debug feature */
extern debug_info_t *cio_debug_msg_id;
extern debug_info_t *cio_debug_trace_id;
extern debug_info_t *cio_debug_crw_id;
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,567 @@
/*
* drivers/s390/cio/css.c
* driver for channel subsystem
* $Revision: 1.84 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
unsigned int highest_subchannel;
int need_rescan = 0;
int css_init_done = 0;
struct pgid global_pgid;
int css_characteristics_avail = 0;
struct device css_bus_device = {
.bus_id = "css0",
};
static struct subchannel *
css_alloc_subchannel(int irq)
{
struct subchannel *sch;
int ret;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, irq);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
if (irq > highest_subchannel)
highest_subchannel = irq;
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
kfree(sch);
return ERR_PTR(-EINVAL);
}
/*
* Set intparm to subchannel address.
* This is fine even on 64bit since the subchannel is always located
* under 2G.
*/
sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
ret = cio_modify(sch);
if (ret) {
kfree(sch);
return ERR_PTR(ret);
}
return sch;
}
static void
css_free_subchannel(struct subchannel *sch)
{
if (sch) {
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
kfree(sch);
}
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->irq))
kfree(sch);
}
extern int css_get_ssd_info(struct subchannel *sch);
static int
css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &css_bus_device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
/* make it known to the system */
ret = device_register(&sch->dev);
if (ret)
printk (KERN_WARNING "%s: could not register %s\n",
__func__, sch->dev.bus_id);
else
css_get_ssd_info(sch);
return ret;
}
int
css_probe_device(int irq)
{
int ret;
struct subchannel *sch;
sch = css_alloc_subchannel(irq);
if (IS_ERR(sch))
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
css_free_subchannel(sch);
return ret;
}
struct subchannel *
get_subchannel_by_schid(int irq)
{
struct subchannel *sch;
struct list_head *entry;
struct device *dev;
if (!get_bus(&css_bus_type))
return NULL;
down_read(&css_bus_type.subsys.rwsem);
sch = NULL;
list_for_each(entry, &css_bus_type.devices.list) {
dev = get_device(container_of(entry,
struct device, bus_list));
if (!dev)
continue;
sch = to_subchannel(dev);
if (sch->irq == irq)
break;
put_device(dev);
sch = NULL;
}
up_read(&css_bus_type.subsys.rwsem);
put_bus(&css_bus_type);
return sch;
}
static inline int
css_get_subchannel_status(struct subchannel *sch, int schid)
{
struct schib schib;
int cc;
cc = stsch(schid, &schib);
if (cc)
return CIO_GONE;
if (!schib.pmcw.dnv)
return CIO_GONE;
if (sch && sch->schib.pmcw.dnv &&
(schib.pmcw.dev != sch->schib.pmcw.dev))
return CIO_REVALIDATE;
if (sch && !sch->lpm)
return CIO_NO_PATH;
return CIO_OPER;
}
static int
css_evaluate_subchannel(int irq, int slow)
{
int event, ret, disc;
struct subchannel *sch;
sch = get_subchannel_by_schid(irq);
disc = sch ? device_is_disconnected(sch) : 0;
if (disc && slow) {
if (sch)
put_device(&sch->dev);
return 0; /* Already processed. */
}
/*
* We've got a machine check, so running I/O won't get an interrupt.
* Kill any pending timers.
*/
if (sch)
device_kill_pending_timer(sch);
if (!disc && !slow) {
if (sch)
put_device(&sch->dev);
return -EAGAIN; /* Will be done on the slow path. */
}
event = css_get_subchannel_status(sch, irq);
CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
irq, event, sch?(disc?"disconnected":"normal"):"unknown",
slow?"slow":"fast");
switch (event) {
case CIO_NO_PATH:
case CIO_GONE:
if (!sch) {
/* Never used this subchannel. Ignore. */
ret = 0;
break;
}
if (disc && (event == CIO_NO_PATH)) {
/*
* Uargh, hack again. Because we don't get a machine
* check on configure on, our path bookkeeping can
* be out of date here (it's fine while we only do
* logical varying or get chsc machine checks). We
* need to force reprobing or we might miss devices
* coming operational again. It won't do harm in real
* no path situations.
*/
device_trigger_reprobe(sch);
ret = 0;
break;
}
if (sch->driver && sch->driver->notify &&
sch->driver->notify(&sch->dev, event)) {
cio_disable_subchannel(sch);
device_set_disconnected(sch);
ret = 0;
break;
}
/*
* Unregister subchannel.
* The device will be killed automatically.
*/
cio_disable_subchannel(sch);
device_unregister(&sch->dev);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
ret = 0;
break;
case CIO_REVALIDATE:
/*
* Revalidation machine check. Sick.
* We don't notify the driver since we have to throw the device
* away in any case.
*/
if (!disc) {
device_unregister(&sch->dev);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
ret = css_probe_device(irq);
} else {
/*
* We can't immediately deregister the disconnected
* device since it might block.
*/
device_trigger_reprobe(sch);
ret = 0;
}
break;
case CIO_OPER:
if (disc)
/* Get device operational again. */
device_trigger_reprobe(sch);
ret = sch ? 0 : css_probe_device(irq);
break;
default:
BUG();
ret = 0;
}
return ret;
}
static void
css_rescan_devices(void)
{
int irq, ret;
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
ret = css_evaluate_subchannel(irq, 1);
/* No more memory. It doesn't make sense to continue. No
* panic because this can happen in midflight and just
* because we can't use a new device is no reason to crash
* the system. */
if (ret == -ENOMEM)
break;
/* -ENXIO indicates that there are no more subchannels. */
if (ret == -ENXIO)
break;
}
}
struct slow_subchannel {
struct list_head slow_list;
unsigned long schid;
};
static LIST_HEAD(slow_subchannels_head);
static spinlock_t slow_subchannel_lock = SPIN_LOCK_UNLOCKED;
static void
css_trigger_slow_path(void)
{
CIO_TRACE_EVENT(4, "slowpath");
if (need_rescan) {
need_rescan = 0;
css_rescan_devices();
return;
}
spin_lock_irq(&slow_subchannel_lock);
while (!list_empty(&slow_subchannels_head)) {
struct slow_subchannel *slow_sch =
list_entry(slow_subchannels_head.next,
struct slow_subchannel, slow_list);
list_del_init(slow_subchannels_head.next);
spin_unlock_irq(&slow_subchannel_lock);
css_evaluate_subchannel(slow_sch->schid, 1);
spin_lock_irq(&slow_subchannel_lock);
kfree(slow_sch);
}
spin_unlock_irq(&slow_subchannel_lock);
}
typedef void (*workfunc)(void *);
DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
struct workqueue_struct *slow_path_wq;
/*
* Rescan for new devices. FIXME: This is slow.
* This function is called when we have lost CRWs due to overflows and we have
* to do subchannel housekeeping.
*/
void
css_reiterate_subchannels(void)
{
css_clear_subchannel_slow_list();
need_rescan = 1;
}
/*
* Called from the machine check handler for subchannel report words.
*/
int
css_process_crw(int irq)
{
int ret;
CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
if (need_rescan)
/* We need to iterate all subchannels anyway. */
return -EAGAIN;
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
ret = css_evaluate_subchannel(irq, 0);
if (ret == -EAGAIN) {
if (css_enqueue_subchannel_slow(irq)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
}
return ret;
}
static void __init
css_generate_pgid(void)
{
/* Let's build our path group ID here. */
if (css_characteristics_avail && css_general_characteristics.mcss)
global_pgid.cpu_addr = 0x8000;
else {
#ifdef CONFIG_SMP
global_pgid.cpu_addr = hard_smp_processor_id();
#else
global_pgid.cpu_addr = 0;
#endif
}
global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
global_pgid.tod_high = (__u32) (get_clock() >> 32);
}
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
*/
static int __init
init_channel_subsystem (void)
{
int ret, irq;
if (chsc_determine_css_characteristics() == 0)
css_characteristics_avail = 1;
css_generate_pgid();
if ((ret = bus_register(&css_bus_type)))
goto out;
if ((ret = device_register (&css_bus_device)))
goto out_bus;
css_init_done = 1;
ctl_set_bit(6, 28);
for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
struct subchannel *sch;
if (cio_is_console(irq))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(irq);
if (IS_ERR(sch))
ret = PTR_ERR(sch);
else
ret = 0;
if (ret == -ENOMEM)
panic("Out of memory in "
"init_channel_subsystem\n");
/* -ENXIO: no more subchannels. */
if (ret == -ENXIO)
break;
if (ret)
continue;
}
/*
* We register ALL valid subchannels in ioinfo, even those
* that have been present before init_channel_subsystem.
* These subchannels can't have been registered yet (kmalloc
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
css_register_subchannel(sch);
}
return 0;
out_bus:
bus_unregister(&css_bus_type);
out:
return ret;
}
/*
* find a driver for a subchannel. They identify by the subchannel
* type with the exception that the console subchannel driver has its own
* subchannel type although the device is an i/o subchannel
*/
static int
css_bus_match (struct device *dev, struct device_driver *drv)
{
struct subchannel *sch = container_of (dev, struct subchannel, dev);
struct css_driver *driver = container_of (drv, struct css_driver, drv);
if (sch->st == driver->subchannel_type)
return 1;
return 0;
}
struct bus_type css_bus_type = {
.name = "css",
.match = &css_bus_match,
};
subsys_initcall(init_channel_subsystem);
/*
* Register root devices for some drivers. The release function must not be
* in the device drivers, so we do it here.
*/
static void
s390_root_dev_release(struct device *dev)
{
kfree(dev);
}
struct device *
s390_root_dev_register(const char *name)
{
struct device *dev;
int ret;
if (!strlen(name))
return ERR_PTR(-EINVAL);
dev = kmalloc(sizeof(struct device), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
memset(dev, 0, sizeof(struct device));
strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
dev->release = s390_root_dev_release;
ret = device_register(dev);
if (ret) {
kfree(dev);
return ERR_PTR(ret);
}
return dev;
}
void
s390_root_dev_unregister(struct device *dev)
{
if (dev)
device_unregister(dev);
}
int
css_enqueue_subchannel_slow(unsigned long schid)
{
struct slow_subchannel *new_slow_sch;
unsigned long flags;
new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
if (!new_slow_sch)
return -ENOMEM;
memset(new_slow_sch, sizeof(struct slow_subchannel), 0);
new_slow_sch->schid = schid;
spin_lock_irqsave(&slow_subchannel_lock, flags);
list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
return 0;
}
void
css_clear_subchannel_slow_list(void)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
while (!list_empty(&slow_subchannels_head)) {
struct slow_subchannel *slow_sch =
list_entry(slow_subchannels_head.next,
struct slow_subchannel, slow_list);
list_del_init(slow_subchannels_head.next);
kfree(slow_sch);
}
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
int
css_slow_subchannels_exist(void)
{
return (!list_empty(&slow_subchannels_head));
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
EXPORT_SYMBOL(s390_root_dev_register);
EXPORT_SYMBOL(s390_root_dev_unregister);
EXPORT_SYMBOL_GPL(css_characteristics_avail);

View File

@@ -0,0 +1,153 @@
#ifndef _CSS_H
#define _CSS_H
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <asm/cio.h>
/*
* path grouping stuff
*/
#define SPID_FUNC_SINGLE_PATH 0x00
#define SPID_FUNC_MULTI_PATH 0x80
#define SPID_FUNC_ESTABLISH 0x00
#define SPID_FUNC_RESIGN 0x40
#define SPID_FUNC_DISBAND 0x20
#define SNID_STATE1_RESET 0
#define SNID_STATE1_UNGROUPED 2
#define SNID_STATE1_GROUPED 3
#define SNID_STATE2_NOT_RESVD 0
#define SNID_STATE2_RESVD_ELSE 2
#define SNID_STATE2_RESVD_SELF 3
#define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0
struct path_state {
__u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */
__u8 state3 : 1; /* path state value 3 */
__u8 resvd : 3; /* reserved */
} __attribute__ ((packed));
struct pgid {
union {
__u8 fc; /* SPID function code */
struct path_state ps; /* SNID path state */
} inf;
__u32 cpu_addr : 16; /* CPU address */
__u32 cpu_id : 24; /* CPU identification */
__u32 cpu_model : 16; /* CPU model */
__u32 tod_high; /* high word TOD clock */
} __attribute__ ((packed));
extern struct pgid global_pgid;
#define MAX_CIWS 8
/*
* sense-id response buffer layout
*/
struct senseid {
/* common part */
__u8 reserved; /* always 0x'FF' */
__u16 cu_type; /* control unit type */
__u8 cu_model; /* control unit model */
__u16 dev_type; /* device type */
__u8 dev_model; /* device model */
__u8 unused; /* padding byte */
/* extended part */
struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed,aligned(4)));
struct ccw_device_private {
int state; /* device state */
atomic_t onoff;
unsigned long registered;
__u16 devno; /* device number */
__u16 irq; /* subchannel number */
__u8 imask; /* lpm mask for SNID/SID/SPGID */
int iretry; /* retry counter SNID/SID/SPGID */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
unsigned int pgroup:1; /* do path grouping */
unsigned int force:1; /* allow forced online */
} __attribute__ ((packed)) options;
struct {
unsigned int pgid_single:1; /* use single path for Set PGID */
unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
struct irb irb; /* device status */
struct senseid senseid; /* SenseID info */
struct pgid pgid; /* path group ID */
struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
struct work_struct kick_work;
wait_queue_head_t wait_q;
struct timer_list timer;
void *cmb; /* measurement information */
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
};
/*
* A css driver handles all subchannels of one type.
* Currently, we only care about I/O subchannels (type 0), these
* have a ccw_device connected to them.
*/
struct css_driver {
unsigned int subchannel_type;
struct device_driver drv;
void (*irq)(struct device *);
int (*notify)(struct device *, int);
void (*verify)(struct device *);
void (*termination)(struct device *);
};
/*
* all css_drivers have the css_bus_type
*/
extern struct bus_type css_bus_type;
extern struct css_driver io_subchannel_driver;
int css_probe_device(int irq);
extern struct subchannel * get_subchannel_by_schid(int irq);
extern unsigned int highest_subchannel;
extern int css_init_done;
#define __MAX_SUBCHANNELS 65536
extern struct bus_type css_bus_type;
extern struct device css_bus_device;
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
void device_set_disconnected(struct subchannel *);
void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
void device_set_waiting(struct subchannel *);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
/* Helper functions to build lists for the slow path. */
int css_enqueue_subchannel_slow(unsigned long schid);
void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void);
extern int need_rescan;
extern struct workqueue_struct *slow_path_wq;
extern struct work_struct slow_path_work;
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
#ifndef S390_DEVICE_H
#define S390_DEVICE_H
/*
* states of the device statemachine
*/
enum dev_state {
DEV_STATE_NOT_OPER,
DEV_STATE_SENSE_PGID,
DEV_STATE_SENSE_ID,
DEV_STATE_OFFLINE,
DEV_STATE_VERIFY,
DEV_STATE_ONLINE,
DEV_STATE_W4SENSE,
DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_WAIT4IO,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
DEV_STATE_DISCONNECTED,
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
/* last element! */
NR_DEV_STATES
};
/*
* asynchronous events of the device statemachine
*/
enum dev_event {
DEV_EVENT_NOTOPER,
DEV_EVENT_INTERRUPT,
DEV_EVENT_TIMEOUT,
DEV_EVENT_VERIFY,
/* last element! */
NR_DEV_EVENTS
};
struct ccw_device;
/*
* action called through jumptable
*/
typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
}
/*
* Delivers 1 if the device state is final.
*/
static inline int
dev_fsm_final_state(struct ccw_device *cdev)
{
return (cdev->private->state == DEV_STATE_NOT_OPER ||
cdev->private->state == DEV_STATE_OFFLINE ||
cdev->private->state == DEV_STATE_ONLINE ||
cdev->private->state == DEV_STATE_BOXED);
}
extern struct workqueue_struct *ccw_device_work;
extern struct workqueue_struct *ccw_device_notify_work;
void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *);
int ccw_device_register(struct ccw_device *);
void ccw_device_do_unreg_rereg(void *);
void ccw_device_call_sch_unregister(void *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);
/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_id_done(struct ccw_device *, int);
/* Function prototypes for path grouping stuff. */
void ccw_device_sense_pgid_start(struct ccw_device *);
void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
void ccw_device_sense_pgid_done(struct ccw_device *, int);
void ccw_device_verify_start(struct ccw_device *);
void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
void ccw_device_disband_done(struct ccw_device *, int);
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
void retry_set_schib(struct ccw_device *cdev);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,354 @@
/*
* drivers/s390/cio/device_id.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck(cohuck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Sense ID functions.
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/ccwdev.h>
#include <asm/delay.h>
#include <asm/cio.h>
#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
/*
* diag210 is used under VM to get information about a virtual device
*/
#ifdef CONFIG_ARCH_S390X
int
diag210(struct diag210 * addr)
{
/*
* diag 210 needs its data below the 2GB border, so we
* use a static data area to be sure
*/
static struct diag210 diag210_tmp;
static spinlock_t diag210_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
asm volatile (
" lhi %0,-1\n"
" sam31\n"
" diag %1,0,0x210\n"
"0: ipm %0\n"
" srl %0,28\n"
"1: sam64\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
".previous"
: "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
*addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
}
#else
int
diag210(struct diag210 * addr)
{
int ccode;
asm volatile (
" lhi %0,-1\n"
" diag %1,0,0x210\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
".previous"
: "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
return ccode;
}
#endif
/*
* Input :
* devno - device number
* ps - pointer to sense ID data area
* Output : none
*/
static void
VM_virtual_device_info (__u16 devno, struct senseid *ps)
{
static struct {
int vrdcvcla, vrdcvtyp, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
{ 0x08, 0x10, 0x3420 },
{ 0x08, 0x42, 0x3424 },
{ 0x08, 0x44, 0x9348 },
{ 0x08, 0x81, 0x3490 },
{ 0x08, 0x82, 0x3422 },
{ 0x10, 0x41, 0x1403 },
{ 0x10, 0x42, 0x3211 },
{ 0x10, 0x43, 0x3203 },
{ 0x10, 0x45, 0x3800 },
{ 0x10, 0x47, 0x3262 },
{ 0x10, 0x48, 0x3820 },
{ 0x10, 0x49, 0x3800 },
{ 0x10, 0x4a, 0x4245 },
{ 0x10, 0x4b, 0x4248 },
{ 0x10, 0x4d, 0x3800 },
{ 0x10, 0x4e, 0x3820 },
{ 0x10, 0x4f, 0x3820 },
{ 0x10, 0x82, 0x2540 },
{ 0x10, 0x84, 0x3525 },
{ 0x20, 0x81, 0x2501 },
{ 0x20, 0x82, 0x2540 },
{ 0x20, 0x84, 0x3505 },
{ 0x40, 0x01, 0x3278 },
{ 0x40, 0x04, 0x3277 },
{ 0x40, 0x80, 0x2250 },
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
struct diag210 diag_data;
int ccode, i;
CIO_TRACE_EVENT (4, "VMvdinf");
diag_data = (struct diag210) {
.vrdcdvno = devno,
.vrdclen = sizeof (diag_data),
};
ccode = diag210 (&diag_data);
ps->reserved = 0xff;
/* Special case for bloody osa devices. */
if (diag_data.vrdcvcla == 0x02 &&
diag_data.vrdcvtyp == 0x20) {
ps->cu_type = 0x3088;
ps->cu_model = 0x60;
return;
}
for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++)
if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla &&
diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) {
ps->cu_type = vm_devices[i].cu_type;
return;
}
CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
"vdev class : %02X, vdev type : %04X \n ... "
"rdev class : %02X, rdev type : %04X, "
"rdev model: %02X\n",
devno, ccode,
diag_data.vrdcvcla, diag_data.vrdcvtyp,
diag_data.vrdcrccl, diag_data.vrdccrty,
diag_data.vrdccrmd);
}
/*
* Start Sense ID helper function.
* Try to obtain the 'control unit'/'device type' information
* associated with the subchannel.
*/
static int
__ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense channel program. */
ccw = cdev->private->iccws;
if (sch->schib.pmcw.pim != 0x80) {
/* more than one path installed. */
ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
ccw->cda = 0;
ccw->count = 0;
ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ccw++;
}
ccw->cmd_code = CCW_CMD_SENSE_ID;
ccw->cda = (__u32) __pa (&cdev->private->senseid);
ccw->count = sizeof (struct senseid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EACCES)
return ret;
}
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
}
return ret;
}
void
ccw_device_sense_id_start(struct ccw_device *cdev)
{
int ret;
memset (&cdev->private->senseid, 0, sizeof (struct senseid));
cdev->private->senseid.cu_type = 0xFFFF;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
}
/*
* Called from interrupt context to check if a valid answer
* to Sense ID was received.
*/
static int
ccw_device_check_sense_id(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
/* Did we get a proper answer ? */
if (cdev->private->senseid.cu_type != 0xFFFF &&
cdev->private->senseid.reserved == 0xFF) {
if (irb->scsw.count < sizeof (struct senseid) - 8)
cdev->private->flags.esid = 1;
return 0; /* Success */
}
/* Check the error cases. */
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
return -ETIME;
if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
/*
* if the device doesn't support the SenseID
* command further retries wouldn't help ...
* NB: We don't check here for intervention required like we
* did before, because tape devices with no tape inserted
* may present this status *in conjunction with* the
* sense id information. So, for intervention required,
* we use the "whack it until it talks" strategy...
*/
CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x "
"reports cmd reject\n",
cdev->private->devno, sch->irq);
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
if ((sch->orb.lpm &
sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on"
" subchannel %04x is 'not operational'\n",
sch->orb.lpm, cdev->private->devno,
sch->irq);
return -EACCES;
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
"subchannel %04x returns status %02X%02X\n",
cdev->private->devno, sch->irq,
irb->scsw.dstat, irb->scsw.cstat);
return -EAGAIN;
}
/*
* Got interrupt for Sense ID.
*/
void
ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
sch = to_subchannel(cdev->dev.parent);
irb = (struct irb *) __LC_IRB;
/* Retry sense id, if needed. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if ((irb->scsw.cc == 1) || !irb->scsw.actl) {
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
}
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
ret = ccw_device_check_sense_id(cdev);
switch (ret) {
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */
case 0: /* Sense id succeeded. */
case -ETIME: /* Sense id stopped by timeout. */
ccw_device_sense_id_done(cdev, ret);
break;
case -EACCES: /* channel is not operational. */
sch->lpm &= ~cdev->private->imask;
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
/* fall through. */
case -EAGAIN: /* try again. */
ret = __ccw_device_sense_id_start(cdev);
if (ret == 0 || ret == -EBUSY)
break;
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
if (MACHINE_IS_VM) {
VM_virtual_device_info (cdev->private->devno,
&cdev->private->senseid);
if (cdev->private->senseid.cu_type != 0xFFFF) {
/* Got the device information from VM. */
ccw_device_sense_id_done(cdev, 0);
return;
}
}
/*
* If we can't couldn't identify the device type we
* consider the device "not operational".
*/
ccw_device_sense_id_done(cdev, -ENODEV);
break;
}
}
EXPORT_SYMBOL(diag210);

View File

@@ -0,0 +1,547 @@
/*
* drivers/s390/cio/device_ops.c
*
* $Revision: 1.50 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cohuck@de.ibm.com)
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
#include <asm/qdio.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
int
ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
{
/*
* The flag usage is mutal exclusive ...
*/
if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
(flags & CCWDEV_REPORT_ALL))
return -EINVAL;
cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
return 0;
}
int
ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
int
ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, unsigned long flags)
{
struct subchannel *sch;
int ret;
if (!cdev)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
return ret;
ret = cio_start (sch, cpa, lpm);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
int
ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, unsigned long flags,
int expires)
{
int ret;
if (!cdev)
return -ENODEV;
ccw_device_set_timeout(cdev, expires);
ret = ccw_device_start(cdev, cpa, intparm, lpm, flags);
if (ret != 0)
ccw_device_set_timeout(cdev, 0);
return ret;
}
int
ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
int
ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
if (!cdev)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return -ENODEV;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
return cio_resume(sch);
}
/*
* Pass interrupt to device driver.
*/
int
ccw_device_call_handler(struct ccw_device *cdev)
{
struct subchannel *sch;
unsigned int stctl;
int ending_status;
sch = to_subchannel(cdev->dev.parent);
/*
* we allow for the device action handler if .
* - we received ending status
* - the action handler requested to see all interrupts
* - we received an intermediate status
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
stctl = cdev->private->irb.scsw.stctl;
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
if (!ending_status &&
!cdev->private->options.repall &&
!(stctl & SCSW_STCTL_INTER_STATUS) &&
!(cdev->private->options.fast &&
(stctl & SCSW_STCTL_PRIM_STATUS)))
return 0;
/*
* Now we are ready to call the device driver interrupt handler.
*/
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
/*
* Clear the old and now useless interrupt response block.
*/
memset(&cdev->private->irb, 0, sizeof(struct irb));
return 1;
}
/*
* Search for CIW command in extended sense data.
*/
struct ciw *
ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
{
int ciw_cnt;
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
return cdev->private->senseid.ciw + ciw_cnt;
return NULL;
}
__u8
ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
return 0;
else
return sch->vpm;
}
static void
ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
{
if (!ip)
/* unsolicited interrupt */
return;
/* Abuse intparm for error reporting. */
if (IS_ERR(irb))
cdev->private->intparm = -EIO;
else if ((irb->scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(irb->scsw.cstat != 0)) {
/*
* We didn't get channel end / device end. Check if path
* verification has been started; we can retry after it has
* finished. We also retry unit checks except for command reject
* or intervention required.
*/
if (cdev->private->flags.doverify ||
cdev->private->state == DEV_STATE_VERIFY)
cdev->private->intparm = -EAGAIN;
if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
!(irb->ecw[0] &
(SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
cdev->private->intparm = -EAGAIN;
else
cdev->private->intparm = -EIO;
} else
cdev->private->intparm = 0;
wake_up(&cdev->private->wait_q);
}
static inline int
__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
{
int ret;
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
do {
ret = cio_start (sch, ccw, 0);
if ((ret == -EBUSY) || (ret == -EACCES)) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
msleep(10);
spin_lock_irq(&sch->lock);
continue;
}
if (ret != 0)
/* Non-retryable error. */
break;
/* Wait for end of request. */
cdev->private->intparm = magic;
spin_unlock_irq(&sch->lock);
wait_event(cdev->private->wait_q,
(cdev->private->intparm == -EIO) ||
(cdev->private->intparm == -EAGAIN) ||
(cdev->private->intparm == 0));
spin_lock_irq(&sch->lock);
/* Check at least for channel end / device end */
if (cdev->private->intparm == -EIO) {
/* Non-retryable error. */
ret = -EIO;
break;
}
if (cdev->private->intparm == 0)
/* Success. */
break;
/* Try again later. */
spin_unlock_irq(&sch->lock);
msleep(10);
spin_lock_irq(&sch->lock);
} while (1);
return ret;
}
/**
* read_dev_chars() - read device characteristics
* @param cdev target ccw device
* @param buffer pointer to buffer for rdc data
* @param length size of rdc data
* @returns 0 for success, negative error value on failure
*
* Context:
* called for online device, lock not held
**/
int
read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
{
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
struct subchannel *sch;
int ret;
struct ccw1 *rdc_ccw;
if (!cdev)
return -ENODEV;
if (!buffer || !length)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT (4, "rddevch");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (!rdc_ccw)
return -ENOMEM;
memset(rdc_ccw, 0, sizeof(struct ccw1));
rdc_ccw->cmd_code = CCW_CMD_RDC;
rdc_ccw->count = length;
rdc_ccw->flags = CCW_FLAG_SLI;
ret = set_normalized_cda (rdc_ccw, (*buffer));
if (ret != 0) {
kfree(rdc_ccw);
return ret;
}
spin_lock_irq(&sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
cdev->handler = ccw_device_wake_up;
if (cdev->private->state != DEV_STATE_ONLINE)
ret = -ENODEV;
else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
ret = -EBUSY;
else
/* 0x00D9C4C3 == ebcdic "RDC" */
ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3);
/* Restore interrupt handler. */
cdev->handler = handler;
spin_unlock_irq(&sch->lock);
clear_normalized_cda (rdc_ccw);
kfree(rdc_ccw);
return ret;
}
/*
* Read Configuration data
*/
int
read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
{
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
struct subchannel *sch;
struct ciw *ciw;
char *rcd_buf;
int ret;
struct ccw1 *rcd_ccw;
if (!cdev)
return -ENODEV;
if (!buffer || !length)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT (4, "rdconf");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
/*
* scan for RCD command in extended SenseID data
*/
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP;
rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (!rcd_ccw)
return -ENOMEM;
memset(rcd_ccw, 0, sizeof(struct ccw1));
rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
if (!rcd_buf) {
kfree(rcd_ccw);
return -ENOMEM;
}
memset (rcd_buf, 0, ciw->count);
rcd_ccw->cmd_code = ciw->cmd;
rcd_ccw->cda = (__u32) __pa (rcd_buf);
rcd_ccw->count = ciw->count;
rcd_ccw->flags = CCW_FLAG_SLI;
spin_lock_irq(&sch->lock);
/* Save interrupt handler. */
handler = cdev->handler;
/* Temporarily install own handler. */
cdev->handler = ccw_device_wake_up;
if (cdev->private->state != DEV_STATE_ONLINE)
ret = -ENODEV;
else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
ret = -EBUSY;
else
/* 0x00D9C3C4 == ebcdic "RCD" */
ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4);
/* Restore interrupt handler. */
cdev->handler = handler;
spin_unlock_irq(&sch->lock);
/*
* on success we update the user input parms
*/
if (ret) {
kfree (rcd_buf);
*buffer = NULL;
*length = 0;
} else {
*length = ciw->count;
*buffer = rcd_buf;
}
kfree(rcd_ccw);
return ret;
}
/*
* Try to break the lock on a boxed device.
*/
int
ccw_device_stlck(struct ccw_device *cdev)
{
void *buf, *buf2;
unsigned long flags;
struct subchannel *sch;
int ret;
if (!cdev)
return -ENODEV;
if (cdev->drv && !cdev->private->options.force)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "stl lock");
CIO_TRACE_EVENT(2, cdev->dev.bus_id);
buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf2) {
kfree(buf);
return -ENOMEM;
}
spin_lock_irqsave(&sch->lock, flags);
ret = cio_enable_subchannel(sch, 3);
if (ret)
goto out_unlock;
/*
* Setup ccw. We chain an unconditional reserve and a release so we
* only break the lock.
*/
cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
cdev->private->iccws[0].cda = (__u32) __pa(buf);
cdev->private->iccws[0].count = 32;
cdev->private->iccws[0].flags = CCW_FLAG_CC;
cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
cdev->private->iccws[1].cda = (__u32) __pa(buf2);
cdev->private->iccws[1].count = 32;
cdev->private->iccws[1].flags = 0;
ret = cio_start(sch, cdev->private->iccws, 0);
if (ret) {
cio_disable_subchannel(sch); //FIXME: return code?
goto out_unlock;
}
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(&sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(cdev->private->irb.scsw.cstat != 0))
ret = -EIO;
/* Clear irb. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
out_unlock:
if (buf)
kfree(buf);
if (buf2)
kfree(buf2);
spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
// FIXME: these have to go:
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
{
return cdev->private->irq;
}
int
_ccw_device_get_device_number(struct ccw_device *cdev)
{
return cdev->private->devno;
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear);
EXPORT_SYMBOL(ccw_device_halt);
EXPORT_SYMBOL(ccw_device_resume);
EXPORT_SYMBOL(ccw_device_start_timeout);
EXPORT_SYMBOL(ccw_device_start);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL(read_conf_data);
EXPORT_SYMBOL(read_dev_chars);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL(_ccw_device_get_device_number);

View File

@@ -0,0 +1,442 @@
/*
* drivers/s390/cio/device_pgid.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck(cohuck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Path Group ID functions.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/lowcore.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
/*
* Start Sense Path Group ID helper function. Used in ccw_device_recog
* and ccw_device_sense_pgid.
*/
static int
__ccw_device_sense_pgid_start(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense path group id channel program. */
ccw = cdev->private->iccws;
ccw->cmd_code = CCW_CMD_SENSE_PGID;
ccw->cda = (__u32) __pa (&cdev->private->pgid);
ccw->count = sizeof (struct pgid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
/* Try every path multiple times. */
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EACCES)
return ret;
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
"%04x, lpm %02X, became 'not "
"operational'\n",
cdev->private->devno, sch->irq,
cdev->private->imask);
}
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
}
return ret;
}
void
ccw_device_sense_pgid_start(struct ccw_device *cdev)
{
int ret;
cdev->private->state = DEV_STATE_SENSE_PGID;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
memset (&cdev->private->pgid, 0, sizeof (struct pgid));
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
}
/*
* Called from interrupt context to check if a valid answer
* to Sense Path Group ID was received.
*/
static int
__ccw_device_check_sense_pgid(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
return -ETIME;
if (irb->esw.esw0.erw.cons &&
(irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
/*
* If the device doesn't support the Sense Path Group ID
* command further retries wouldn't help ...
*/
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
CIO_MSG_EVENT(2, "SNID - device %04x, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
"%04x, lpm %02X, became 'not operational'\n",
cdev->private->devno, sch->irq, sch->orb.lpm);
return -EACCES;
}
if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x "
"is reserved by someone else\n",
cdev->private->devno, sch->irq);
return -EUSERS;
}
return 0;
}
/*
* Got interrupt for Sense Path Group ID.
*/
void
ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
irb = (struct irb *) __LC_IRB;
/* Retry sense pgid for cc=1. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (irb->scsw.cc == 1) {
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
}
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
switch (__ccw_device_check_sense_pgid(cdev)) {
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case 0: /* Sense Path Group ID successful. */
if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
memcpy(&cdev->private->pgid, &global_pgid,
sizeof(struct pgid));
ccw_device_sense_pgid_done(cdev, 0);
break;
case -EOPNOTSUPP: /* Sense Path Group ID not supported */
ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
break;
case -ETIME: /* Sense path group id stopped by timeout. */
ccw_device_sense_pgid_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
sch->lpm &= ~cdev->private->imask;
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
/* Fall through. */
case -EAGAIN: /* Try again. */
ret = __ccw_device_sense_pgid_start(cdev);
if (ret != 0 && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, -ENODEV);
break;
case -EUSERS: /* device is reserved for someone else. */
ccw_device_sense_pgid_done(cdev, -EUSERS);
break;
}
}
/*
* Path Group ID helper function.
*/
static int
__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
{
struct subchannel *sch;
struct ccw1 *ccw;
int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense path group id channel program. */
cdev->private->pgid.inf.fc = func;
ccw = cdev->private->iccws;
if (!cdev->private->flags.pgid_single) {
cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH;
ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
ccw->cda = 0;
ccw->count = 0;
ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ccw++;
} else
cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH;
ccw->cmd_code = CCW_CMD_SET_PGID;
ccw->cda = (__u32) __pa (&cdev->private->pgid);
ccw->count = sizeof (struct pgid);
ccw->flags = CCW_FLAG_SLI;
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try multiple times. */
ret = -ENODEV;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if ((ret != -EACCES) && (ret != -ENODEV))
return ret;
}
/* PGID command failed on this path. Switch it off. */
sch->lpm &= ~cdev->private->imask;
sch->vpm &= ~cdev->private->imask;
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
"%04x, lpm %02X, became 'not operational'\n",
cdev->private->devno, sch->irq, cdev->private->imask);
return ret;
}
/*
* Called from interrupt context to check if a valid answer
* to Set Path Group ID was received.
*/
static int
__ccw_device_check_pgid(struct ccw_device *cdev)
{
struct subchannel *sch;
struct irb *irb;
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
return -ETIME;
if (irb->esw.esw0.erw.cons) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
cdev->private->devno, irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
irb->ecw[4], irb->ecw[5],
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
"%04x, lpm %02X, became 'not operational'\n",
cdev->private->devno, sch->irq,
cdev->private->imask);
return -EACCES;
}
return 0;
}
static void
__ccw_device_verify_start(struct ccw_device *cdev)
{
struct subchannel *sch;
__u8 imask, func;
int ret;
sch = to_subchannel(cdev->dev.parent);
while (sch->vpm != sch->lpm) {
/* Find first unequal bit in vpm vs. lpm */
for (imask = 0x80; imask != 0; imask >>= 1)
if ((sch->vpm & imask) != (sch->lpm & imask))
break;
cdev->private->imask = imask;
func = (sch->vpm & imask) ?
SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH;
ret = __ccw_device_do_pgid(cdev, func);
if (ret == 0 || ret == -EBUSY)
return;
cdev->private->iretry = 5;
}
ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
}
/*
* Got interrupt for Set Path Group ID.
*/
void
ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
irb = (struct irb *) __LC_IRB;
/* Retry set pgid for cc=1. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (irb->scsw.cc == 1)
__ccw_device_verify_start(cdev);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
switch (__ccw_device_check_pgid(cdev)) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0:
/* Establish or Resign Path Group done. Update vpm. */
if ((sch->lpm & cdev->private->imask) != 0)
sch->vpm |= cdev->private->imask;
else
sch->vpm &= ~cdev->private->imask;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
case -EOPNOTSUPP:
/*
* One of those strange devices which claim to be able
* to do multipathing but not for Set Path Group ID.
*/
if (cdev->private->flags.pgid_single) {
ccw_device_verify_done(cdev, -EOPNOTSUPP);
break;
}
cdev->private->flags.pgid_single = 1;
/* fall through. */
case -EAGAIN: /* Try again. */
__ccw_device_verify_start(cdev);
break;
case -ETIME: /* Set path group id stopped by timeout. */
ccw_device_verify_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
sch->lpm &= ~cdev->private->imask;
sch->vpm &= ~cdev->private->imask;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
}
}
void
ccw_device_verify_start(struct ccw_device *cdev)
{
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
}
static void
__ccw_device_disband_start(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent);
while (cdev->private->imask != 0) {
if (sch->lpm & cdev->private->imask) {
ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND);
if (ret == 0)
return;
}
cdev->private->iretry = 5;
cdev->private->imask >>= 1;
}
ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
}
/*
* Got interrupt for Unset Path Group ID.
*/
void
ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
struct irb *irb;
int ret;
irb = (struct irb *) __LC_IRB;
/* Retry set pgid for cc=1. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (irb->scsw.cc == 1)
__ccw_device_disband_start(cdev);
return;
}
if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
return;
sch = to_subchannel(cdev->dev.parent);
ret = __ccw_device_check_pgid(cdev);
switch (ret) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0: /* disband successful. */
sch->vpm = 0;
ccw_device_disband_done(cdev, ret);
break;
case -EOPNOTSUPP:
/*
* One of those strange devices which claim to be able
* to do multipathing but not for Unset Path Group ID.
*/
cdev->private->flags.pgid_single = 1;
/* fall through. */
case -EAGAIN: /* Try again. */
__ccw_device_disband_start(cdev);
break;
case -ETIME: /* Set path group id stopped by timeout. */
ccw_device_disband_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_disband_start(cdev);
break;
}
}
void
ccw_device_disband_start(struct ccw_device *cdev)
{
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
cdev->private->imask = 0x80;
__ccw_device_disband_start(cdev);
}

View File

@@ -0,0 +1,385 @@
/*
* drivers/s390/cio/device_status.c
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Cornelia Huck(cohuck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Status accumulation and basic sense functions.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
/*
* Check for any kind of channel or interface control check but don't
* issue the message for the console device
*/
static inline void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)))
return;
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
" ... device %04X on subchannel %04X, dev_stat "
": %02X sch_stat : %02X\n",
cdev->private->devno, cdev->private->irq,
cdev->private->irb.scsw.dstat,
cdev->private->irb.scsw.cstat);
if (irb->scsw.cc != 3) {
char dbf_text[15];
sprintf(dbf_text, "chk%x", cdev->private->irq);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &cdev->private->irb, sizeof (struct irb));
}
}
/*
* Some paths became not operational (pno bit in scsw is set).
*/
static void
ccw_device_path_notoper(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
stsch (sch->irq, &sch->schib);
CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are "
"not operational \n", __FUNCTION__, sch->irq,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
if (cdev->private->options.pgroup)
cdev->private->flags.doverify = 1;
}
/*
* Copy valid bits from the extended control word to device irb.
*/
static inline void
ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
{
/*
* Copy extended control bit if it is valid... yes there
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
cdev->private->irb.scsw.ectl = 0;
if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS))
cdev->private->irb.scsw.ectl = irb->scsw.ectl;
/* Check if extended control word is valid. */
if (!cdev->private->irb.scsw.ectl)
return;
/* Copy concurrent sense / model dependent information. */
memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
}
/*
* Check if extended status word is valid.
*/
static inline int
ccw_device_accumulate_esw_valid(struct irb *irb)
{
if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
return 0;
if (irb->scsw.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
!(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
return 0;
return 1;
}
/*
* Copy valid bits from the extended status word to device irb.
*/
static inline void
ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
struct sublog *cdev_sublog, *sublog;
if (!ccw_device_accumulate_esw_valid(irb))
return;
cdev_irb = &cdev->private->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
/* Copy subchannel logout information if esw is of format 0. */
if (irb->scsw.eswf) {
cdev_sublog = &cdev_irb->esw.esw0.sublog;
sublog = &irb->esw.esw0.sublog;
/* Copy extended status flags. */
cdev_sublog->esf = sublog->esf;
/*
* Copy fields that have a meaning for channel data check
* channel control check and interface control check.
*/
if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)) {
/* Copy ancillary report bit. */
cdev_sublog->arep = sublog->arep;
/* Copy field-validity-flags. */
cdev_sublog->fvf = sublog->fvf;
/* Copy storage access code. */
cdev_sublog->sacc = sublog->sacc;
/* Copy termination code. */
cdev_sublog->termc = sublog->termc;
/* Copy sequence code. */
cdev_sublog->seqc = sublog->seqc;
}
/* Copy device status check. */
cdev_sublog->devsc = sublog->devsc;
/* Copy secondary error. */
cdev_sublog->serr = sublog->serr;
/* Copy i/o-error alert. */
cdev_sublog->ioerr = sublog->ioerr;
/* Copy channel path timeout bit. */
if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK)
cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
/* Copy failing storage address validity flag. */
cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
if (cdev_irb->esw.esw0.erw.fsavf) {
/* ... and copy the failing storage address. */
memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
sizeof (irb->esw.esw0.faddr));
/* ... and copy the failing storage address format. */
cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
}
/* Copy secondary ccw address validity bit. */
cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
if (irb->esw.esw0.erw.scavf)
/* ... and copy the secondary ccw address. */
cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
}
/* FIXME: DCTI for format 2? */
/* Copy authorization bit. */
cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
/* Copy path verification required flag. */
cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
cdev->private->flags.doverify = 1;
/* Copy concurrent sense bit. */
cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
if (irb->esw.esw0.erw.cons)
cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
}
/*
* Accumulate status from irb to devstat.
*/
void
ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (irb->scsw.pno && irb->scsw.fctl != 0 &&
(!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
ccw_device_path_notoper(cdev);
/*
* Don't accumulate unsolicited interrupts.
*/
if ((irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
(!irb->scsw.cc))
return;
cdev_irb = &cdev->private->irb;
/* Copy bits which are valid only for the start function. */
if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) {
/* Copy key. */
cdev_irb->scsw.key = irb->scsw.key;
/* Copy suspend control bit. */
cdev_irb->scsw.sctl = irb->scsw.sctl;
/* Accumulate deferred condition code. */
cdev_irb->scsw.cc |= irb->scsw.cc;
/* Copy ccw format bit. */
cdev_irb->scsw.fmt = irb->scsw.fmt;
/* Copy prefetch bit. */
cdev_irb->scsw.pfch = irb->scsw.pfch;
/* Copy initial-status-interruption-control. */
cdev_irb->scsw.isic = irb->scsw.isic;
/* Copy address limit checking control. */
cdev_irb->scsw.alcc = irb->scsw.alcc;
/* Copy suppress suspend bit. */
cdev_irb->scsw.ssi = irb->scsw.ssi;
}
/* Take care of the extended control bit and extended control word. */
ccw_device_accumulate_ecw(cdev, irb);
/* Accumulate function control. */
cdev_irb->scsw.fctl |= irb->scsw.fctl;
/* Copy activity control. */
cdev_irb->scsw.actl= irb->scsw.actl;
/* Accumulate status control. */
cdev_irb->scsw.stctl |= irb->scsw.stctl;
/*
* Copy ccw address if it is valid. This is a bit simplified
* but should be close enough for all practical purposes.
*/
if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) ||
((irb->scsw.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
(irb->scsw.actl & SCSW_ACTL_DEVACT) &&
(irb->scsw.actl & SCSW_ACTL_SCHACT)) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
cdev_irb->scsw.cpa = irb->scsw.cpa;
/* Accumulate device status, but not the device busy flag. */
cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY;
cdev_irb->scsw.dstat |= irb->scsw.dstat;
/* Accumulate subchannel status. */
cdev_irb->scsw.cstat |= irb->scsw.cstat;
/* Copy residual count if it is valid. */
if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
(irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0)
cdev_irb->scsw.count = irb->scsw.count;
/* Take care of bits in the extended status word. */
ccw_device_accumulate_esw(cdev, irb);
/*
* Check whether we must issue a SENSE CCW ourselves if there is no
* concurrent sense facility installed for the subchannel.
* No sense is required if no delayed sense is pending
* and we did not get a unit check without sense information.
*
* Note: We should check for ioinfo[irq]->flags.consns but VM
* violates the ESA/390 architecture and doesn't present an
* operand exception for virtual devices without concurrent
* sense facility available/supported when enabling the
* concurrent sense facility.
*/
if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
!(cdev_irb->esw.esw0.erw.cons))
cdev->private->flags.dosense = 1;
}
/*
* Do a basic sense.
*/
int
ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/* A sense is required, can we do it now ? */
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
/*
* we received an Unit Check but we have no final
* status yet, therefore we must delay the SENSE
* processing. We must not report this intermediate
* status to the device interrupt handler.
*/
return -EBUSY;
/*
* We have ending status but no sense information. Do a basic sense.
*/
sch = to_subchannel(cdev->dev.parent);
sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
sch->sense_ccw.count = SENSE_MAX_COUNT;
sch->sense_ccw.flags = CCW_FLAG_SLI;
return cio_start (sch, &sch->sense_ccw, 0xff);
}
/*
* Add information from basic sense to devstat.
*/
void
ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
{
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (irb->scsw.pno && irb->scsw.fctl != 0 &&
(!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
ccw_device_path_notoper(cdev);
if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.dstat & DEV_STAT_CHN_END)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
if (ccw_device_accumulate_esw_valid(irb) &&
irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup)
cdev->private->flags.doverify = 1;
}
/*
* This function accumulates the status into the private devstat and
* starts a basic sense if one is needed.
*/
int
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
{
ccw_device_accumulate_irb(cdev, irb);
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
return -EBUSY;
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
if (cdev->private->flags.dosense) {
ccw_device_do_sense(cdev, irb);
return -EBUSY;
}
return 0;
}

View File

@@ -0,0 +1,228 @@
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
/*
* TPI info structure
*/
struct tpi_info {
__u32 reserved1 : 16; /* reserved 0x00000001 */
__u32 irq : 16; /* aka. subchannel number */
__u32 intparm; /* interruption parameter */
__u32 adapter_IO : 1;
__u32 reserved2 : 1;
__u32 isc : 3;
__u32 reserved3 : 12;
__u32 int_type : 3;
__u32 reserved4 : 12;
} __attribute__ ((packed));
/*
* Some S390 specific IO instructions as inline
*/
extern __inline__ int stsch(int irq, volatile struct schib *addr)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" stsch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000), "a" (addr)
: "cc", "1" );
return ccode;
}
extern __inline__ int msch(int irq, volatile struct schib *addr)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" msch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L), "a" (addr)
: "cc", "1" );
return ccode;
}
extern __inline__ int msch_err(int irq, volatile struct schib *addr)
{
int ccode;
__asm__ __volatile__(
" lhi %0,%3\n"
" lr 1,%1\n"
" msch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
#ifdef CONFIG_ARCH_S390X
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
".previous"
#else
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
".previous"
#endif
: "=&d" (ccode)
: "d" (irq | 0x10000L), "a" (addr), "K" (-EIO)
: "cc", "1" );
return ccode;
}
extern __inline__ int tsch(int irq, volatile struct irb *addr)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" tsch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L), "a" (addr)
: "cc", "1" );
return ccode;
}
extern __inline__ int tpi( volatile struct tpi_info *addr)
{
int ccode;
__asm__ __volatile__(
" tpi 0(%1)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "a" (addr)
: "cc", "1" );
return ccode;
}
extern __inline__ int ssch(int irq, volatile struct orb *addr)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" ssch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L), "a" (addr)
: "cc", "1" );
return ccode;
}
extern __inline__ int rsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" rsch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int csch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" csch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int hsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" hsch\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int xsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" .insn rre,0xb2760000,%1,0\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int chsc(void *chsc_area)
{
int cc;
__asm__ __volatile__ (
".insn rre,0xb25f0000,%1,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (chsc_area)
: "cc" );
return cc;
}
extern __inline__ int iac( void)
{
int ccode;
__asm__ __volatile__(
" iac 1\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode) : : "cc", "1" );
return ccode;
}
extern __inline__ int rchp(int chpid)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" rchp\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (chpid)
: "cc", "1" );
return ccode;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,648 @@
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_VERBOSE_LEVEL 5
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
#define QDIO_PERFORMANCE_STATS
#endif /* CONFIG_QDIO_PERF_STATS */
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
/*
* unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
* we never know, whether we'll get initiative again, e.g. to give the
* transmit skb's back to the stack, however the stack may be waiting for
* them... therefore we define 4 as threshold to start polling (which
* will stop as soon as the asynchronous queue catches up)
* btw, this only applies to the asynchronous HiperSockets queue
*/
#define IQDIO_FILL_LEVEL_TO_POLL 4
#define TIQDIO_THININT_ISC 3
#define TIQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
#define IQDIO_LOCAL_LAPS 4
#define IQDIO_LOCAL_LAPS_INT 1
#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
/*#define IQDIO_IQDC_INT_PARM 0x1234*/
#define QDIO_Q_LAPS 5
#define QDIO_STORAGE_KEY 0
#define L2_CACHELINE_SIZE 256
#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
#define QDIO_PERF "qdio_perf"
/* must be a power of 2 */
/*#define QDIO_STATS_NUMBER 4
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10)
#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
QDIO_IRQ_STATE_ESTABLISHED,
QDIO_IRQ_STATE_ACTIVE,
QDIO_IRQ_STATE_STOPPED,
QDIO_IRQ_STATE_CLEANUP,
QDIO_IRQ_STATE_ERR,
NR_QDIO_IRQ_STATES,
};
/* used as intparm in do_IO: */
#define QDIO_DOING_SENSEID 0
#define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_CLEANUP 3
/************************* DEBUG FACILITY STUFF *********************/
#define QDIO_DBF_HEX(ex,name,level,addr,len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
else \
debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define QDIO_DBF_TEXT(ex,name,level,text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name,level,text); \
else \
debug_text_event(qdio_dbf_##name,level,text); \
} while (0)
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SETUP_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SBAL_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SENSE_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_OUT_INDEX 8
#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
#define QDIO_DBF_SLSB_OUT_LEVEL 6
#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_STUPID(x...)
#endif
#if QDIO_VERBOSE_LEVEL>7
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALL(x...)
#endif
#if QDIO_VERBOSE_LEVEL>6
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_INFO(x...)
#endif
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_WARN(x...)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ERR(x...)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_CRIT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALERT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>1
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_EMERG(x...)
#endif
#define HEXDUMP16(importance,header,ptr) \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
*(((char*)ptr)+1),*(((char*)ptr)+2), \
*(((char*)ptr)+3),*(((char*)ptr)+4), \
*(((char*)ptr)+5),*(((char*)ptr)+6), \
*(((char*)ptr)+7),*(((char*)ptr)+8), \
*(((char*)ptr)+9),*(((char*)ptr)+10), \
*(((char*)ptr)+11),*(((char*)ptr)+12), \
*(((char*)ptr)+13),*(((char*)ptr)+14), \
*(((char*)ptr)+15)); \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)+16),*(((char*)ptr)+17), \
*(((char*)ptr)+18),*(((char*)ptr)+19), \
*(((char*)ptr)+20),*(((char*)ptr)+21), \
*(((char*)ptr)+22),*(((char*)ptr)+23), \
*(((char*)ptr)+24),*(((char*)ptr)+25), \
*(((char*)ptr)+26),*(((char*)ptr)+27), \
*(((char*)ptr)+28),*(((char*)ptr)+29), \
*(((char*)ptr)+30),*(((char*)ptr)+31));
/****************** END OF DEBUG FACILITY STUFF *********************/
/*
* Some instructions as assembly
*/
extern __inline__ int
do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
{
int cc;
#ifndef CONFIG_ARCH_S390X
asm volatile (
"lhi 0,2 \n\t"
"lr 1,%1 \n\t"
"lr 2,%2 \n\t"
"lr 3,%3 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (0x10000|irq), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
#else /* CONFIG_ARCH_S390X */
asm volatile (
"lghi 0,2 \n\t"
"llgfr 1,%1 \n\t"
"llgfr 2,%2 \n\t"
"llgfr 3,%3 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (0x10000|irq), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
#endif /* CONFIG_ARCH_S390X */
return cc;
}
extern __inline__ int
do_siga_input(unsigned int irq, unsigned int mask)
{
int cc;
#ifndef CONFIG_ARCH_S390X
asm volatile (
"lhi 0,1 \n\t"
"lr 1,%1 \n\t"
"lr 2,%2 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (0x10000|irq), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
#else /* CONFIG_ARCH_S390X */
asm volatile (
"lghi 0,1 \n\t"
"llgfr 1,%1 \n\t"
"llgfr 2,%2 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (0x10000|irq), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
#endif /* CONFIG_ARCH_S390X */
return cc;
}
extern __inline__ int
do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
{
int cc;
__u32 busy_bit;
#ifndef CONFIG_ARCH_S390X
asm volatile (
"lhi 0,0 \n\t"
"lr 1,%2 \n\t"
"lr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
"ipm %0 \n\t"
"srl %0,28 \n\t"
"srl 0,31 \n\t"
"lr %1,0 \n\t"
"1: \n\t"
".section .fixup,\"ax\"\n\t"
"2: \n\t"
"lhi %0,%4 \n\t"
"bras 1,3f \n\t"
".long 1b \n\t"
"3: \n\t"
"l 1,0(1) \n\t"
"br 1 \n\t"
".previous \n\t"
".section __ex_table,\"a\"\n\t"
".align 4 \n\t"
".long 0b,2b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
: "d" (0x10000|irq), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "0", "1", "2", "memory"
);
#else /* CONFIG_ARCH_S390X */
asm volatile (
"lghi 0,0 \n\t"
"llgfr 1,%2 \n\t"
"llgfr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
"ipm %0 \n\t"
"srl %0,28 \n\t"
"srl 0,31 \n\t"
"llgfr %1,0 \n\t"
"1: \n\t"
".section .fixup,\"ax\"\n\t"
"lghi %0,%4 \n\t"
"jg 1b \n\t"
".previous\n\t"
".section __ex_table,\"a\"\n\t"
".align 8 \n\t"
".quad 0b,1b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
: "d" (0x10000|irq), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "0", "1", "2", "memory"
);
#endif /* CONFIG_ARCH_S390X */
(*bb) = busy_bit;
return cc;
}
extern __inline__ unsigned long
do_clear_global_summary(void)
{
unsigned long time;
#ifndef CONFIG_ARCH_S390X
asm volatile (
"lhi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
#else /* CONFIG_ARCH_S390X */
asm volatile (
"lghi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lgr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
#endif /* CONFIG_ARCH_S390X */
return time;
}
/*
* QDIO device commands returned by extended Sense-ID
*/
#define DEFAULT_ESTABLISH_QS_CMD 0x1b
#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
#define DEFAULT_ACTIVATE_QS_CMD 0x1f
#define DEFAULT_ACTIVATE_QS_COUNT 0
/*
* additional CIWs returned by extended Sense-ID
*/
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
#define QDIO_CHSC_RESPONSE_CODE_OK 1
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
#ifdef QDIO_PERFORMANCE_STATS
struct qdio_perf_stats {
unsigned int tl_runs;
unsigned int siga_outs;
unsigned int siga_ins;
unsigned int siga_syncs;
unsigned int pcis;
unsigned int thinints;
unsigned int fast_reqs;
__u64 start_time_outbound;
unsigned int outbound_cnt;
unsigned int outbound_time;
__u64 start_time_inbound;
unsigned int inbound_cnt;
unsigned int inbound_time;
};
#endif /* QDIO_PERFORMANCE_STATS */
#define atomic_swap(a,b) xchg((int*)a.counter,b)
/* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
qdio_siga_sync(q,~0U,~0U)
#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
qdio_siga_sync(q,~0U,0)
#define NOW qdio_get_micros()
#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW
#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
#define MY_MODULE_STRING(x) #x
#ifdef CONFIG_ARCH_S390X
#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
#else /* CONFIG_ARCH_S390X */
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_ARCH_S390X */
#ifdef CONFIG_QDIO_DEBUG
#define set_slsb(x,y) \
if(q->queue_type==QDIO_TRACE_QTYPE) { \
if(q->is_input_q) { \
QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} else { \
QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} \
} \
qdio_set_slsb(x,y); \
if(q->queue_type==QDIO_TRACE_QTYPE) { \
if(q->is_input_q) { \
QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} else { \
QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} \
}
#else /* CONFIG_QDIO_DEBUG */
#define set_slsb(x,y) qdio_set_slsb(x,y)
#endif /* CONFIG_QDIO_DEBUG */
struct qdio_q {
volatile struct slsb slsb;
char unused[QDIO_MAX_BUFFERS_PER_Q];
__u32 * volatile dev_st_chg_ind;
int is_input_q;
int irq;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
unsigned int is_thinint_q;
/* bit 0 means queue 0, bit 1 means queue 1, ... */
unsigned int mask;
unsigned int q_no;
qdio_handler_t (*handler);
/* points to the next buffer to be checked for having
* been processed by the card (outbound)
* or to the next buffer the program should check for (inbound) */
volatile int first_to_check;
/* and the last time it was: */
volatile int last_move_ftc;
atomic_t number_of_buffers_used;
atomic_t polling;
unsigned int siga_in;
unsigned int siga_out;
unsigned int siga_sync;
unsigned int siga_sync_done_on_thinints;
unsigned int siga_sync_done_on_outb_tis;
unsigned int hydra_gives_outbound_pcis;
/* used to save beginning position when calling dd_handlers */
int first_element_to_kick;
atomic_t use_count;
atomic_t is_in_shutdown;
void *irq_ptr;
#ifdef QDIO_USE_TIMERS_FOR_POLLING
struct timer_list timer;
atomic_t timer_already_set;
spinlock_t timer_lock;
#else /* QDIO_USE_TIMERS_FOR_POLLING */
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
enum qdio_irq_states state;
/* used to store the error condition during a data transfer */
unsigned int qdio_error;
unsigned int siga_error;
unsigned int error_status_flags;
/* list of interesting queues */
volatile struct qdio_q *list_next;
volatile struct qdio_q *list_prev;
struct sl *sl;
volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
unsigned long int_parm;
/*struct {
int in_bh_check_limit;
int threshold;
} threshold_classes[QDIO_STATS_CLASSES];*/
struct {
/* inbound: the time to stop polling
outbound: the time to kick peer */
int threshold; /* the real value */
/* outbound: last time of do_QDIO
inbound: last time of noticing incoming data */
/*__u64 last_transfer_times[QDIO_STATS_NUMBER];
int last_transfer_index; */
__u64 last_transfer_time;
__u64 busy_start;
} timing;
atomic_t busy_siga_counter;
unsigned int queue_type;
/* leave this member at the end. won't be cleared in qdio_fill_qs */
struct slib *slib; /* a page is allocated under this pointer,
sl points into this page, offset PAGE_SIZE/2
(after slib) */
} __attribute__ ((aligned(256)));
struct qdio_irq {
__u32 * volatile dev_st_chg_ind;
unsigned long int_parm;
int irq;
unsigned int is_iqdio_irq;
unsigned int is_thinint_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
enum qdio_irq_states state;
unsigned int no_input_qs;
unsigned int no_output_qs;
unsigned char qdioac;
struct ccw1 ccw;
struct ciw equeue;
struct ciw aqueue;
struct qib qib;
void (*original_int_handler) (struct ccw_device *,
unsigned long, struct irb *);
/* leave these four members together at the end. won't be cleared in qdio_fill_irq */
struct qdr *qdr;
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct semaphore setting_up_sema;
};
#endif