diff -Nru a/arch/ia64/Kconfig b/arch/ia64/Kconfig
--- a/arch/ia64/Kconfig Tue Apr 13 17:34:24 2004
+++ b/arch/ia64/Kconfig Tue Apr 13 17:34:24 2004
@@ -16,7 +16,7 @@
The Itanium Processor Family is Intel's 64-bit successor to
the 32-bit X86 line. The IA-64 Linux project has a home
page at and a mailing list at
- linux-ia64@vger.kernel.org.
+ .
config 64BIT
bool
@@ -57,7 +57,7 @@
DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
HP-zx1/sx1000 For HP systems
SGI-SN2 For SGI Altix systems
- Ski-simulator For the HP simulator ()
+ Ski-simulator For the HP simulator
If you don't know what to do, choose "generic".
@@ -443,6 +443,33 @@
send a BREAK and then within 5 seconds a command keypress. The
keys are documented in . Don't say Y
unless you really know what this hack does.
+
+config IA64_EARLY_PRINTK
+ bool "Early printk support"
+ depends on DEBUG_KERNEL && !IA64_GENERIC
+ help
+ Selecting this option uses the VGA screen or serial console for
+ printk() output before the consoles are initialised. It is useful
+ for debugging problems early in the boot process, but only if you
+ have a suitable VGA/serial console attached. If you're unsure,
+ select N.
+
+config IA64_EARLY_PRINTK_UART
+ bool "Early printk on MMIO serial port"
+ depends on IA64_EARLY_PRINTK
+
+config IA64_EARLY_PRINTK_UART_BASE
+ hex "UART MMIO base address"
+ depends on IA64_EARLY_PRINTK_UART
+ default "ff5e0000"
+
+config IA64_EARLY_PRINTK_VGA
+ bool "Early printk on VGA"
+ depends on IA64_EARLY_PRINTK
+
+config IA64_EARLY_PRINTK_SGI_SN
+ bool "Early printk on SGI SN serial console"
+ depends on IA64_EARLY_PRINTK && (IA64_GENERIC || IA64_SGI_SN2)
config DEBUG_SLAB
bool "Debug memory allocations"
diff -Nru a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
--- a/arch/ia64/hp/common/sba_iommu.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/hp/common/sba_iommu.c Tue Apr 13 17:34:23 2004
@@ -1732,7 +1732,6 @@
if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
ia64_max_iommu_merge_mask = ~iovp_mask;
- MAX_DMA_ADDRESS = ~0UL;
printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
@@ -1965,6 +1964,18 @@
}
subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
+
+extern void dig_setup(char**);
+/*
+ * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
+ * so we use the platform_setup hook to fix it up.
+ */
+void __init
+sba_setup(char **cmdline_p)
+{
+ MAX_DMA_ADDRESS = ~0UL;
+ dig_setup(cmdline_p);
+}
static int __init
nosbagart(char *str)
diff -Nru a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig
--- a/arch/ia64/hp/sim/Kconfig Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/hp/sim/Kconfig Tue Apr 13 17:34:23 2004
@@ -13,7 +13,7 @@
depends on HP_SIMSERIAL
config HP_SIMSCSI
- bool "Simulated SCSI disk"
+ tristate "Simulated SCSI disk"
depends on SCSI
endmenu
diff -Nru a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
--- a/arch/ia64/kernel/acpi.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/kernel/acpi.c Tue Apr 13 17:34:23 2004
@@ -455,6 +455,7 @@
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
pxm_to_nid_map[i] = numnodes;
+ node_set_online(numnodes);
nid_to_pxm_map[numnodes++] = i;
}
}
@@ -506,6 +507,13 @@
}
#endif /* CONFIG_ACPI_NUMA */
+unsigned int
+acpi_register_gsi (u32 gsi, int polarity, int trigger)
+{
+ return acpi_register_irq(gsi, polarity, trigger);
+}
+EXPORT_SYMBOL(acpi_register_gsi);
+
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
@@ -527,7 +535,7 @@
if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
acpi_legacy_devices = 1;
- acpi_register_irq(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
+ acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
return 0;
}
diff -Nru a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
--- a/arch/ia64/kernel/efi.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/kernel/efi.c Tue Apr 13 17:34:23 2004
@@ -674,8 +674,7 @@
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
- /* paranoia attribute checking */
- if (md->attribute == (EFI_MEMORY_UC | EFI_MEMORY_RUNTIME))
+ if (md->attribute & EFI_MEMORY_UC)
return md->phys_addr;
}
}
diff -Nru a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
--- a/arch/ia64/kernel/iosapic.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/kernel/iosapic.c Tue Apr 13 17:34:23 2004
@@ -172,7 +172,7 @@
static void
set_rte (unsigned int vector, unsigned int dest, int mask)
{
- unsigned long pol, trigger, dmode;
+ unsigned long pol, trigger, dmode, flags;
u32 low32, high32;
char *addr;
int rte_index;
@@ -211,11 +211,15 @@
/* dest contains both id and eid */
high32 = (dest << IOSAPIC_DEST_SHIFT);
- writel(IOSAPIC_RTE_HIGH(rte_index), addr + IOSAPIC_REG_SELECT);
- writel(high32, addr + IOSAPIC_WINDOW);
- writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
- writel(low32, addr + IOSAPIC_WINDOW);
- iosapic_intr_info[vector].low32 = low32;
+ spin_lock_irqsave(&iosapic_lock, flags);
+ {
+ writel(IOSAPIC_RTE_HIGH(rte_index), addr + IOSAPIC_REG_SELECT);
+ writel(high32, addr + IOSAPIC_WINDOW);
+ writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
+ writel(low32, addr + IOSAPIC_WINDOW);
+ iosapic_intr_info[vector].low32 = low32;
+ }
+ spin_unlock_irqrestore(&iosapic_lock, flags);
}
static void
@@ -289,7 +293,7 @@
irq &= (~IA64_IRQ_REDIRECTED);
vec = irq_to_vector(irq);
- if (cpus_empty(mask) || vec >= IA64_NUM_VECTORS)
+ if (cpus_empty(mask))
return;
dest = cpu_physical_id(first_cpu(mask));
diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
--- a/arch/ia64/kernel/perfmon.c Tue Apr 13 17:34:24 2004
+++ b/arch/ia64/kernel/perfmon.c Tue Apr 13 17:34:24 2004
@@ -57,7 +57,6 @@
#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
-#define PFM_CTX_TERMINATED 5 /* the task the context was loaded onto is gone */
#define PFM_INVALID_ACTIVATION (~0UL)
@@ -473,6 +472,7 @@
int debug; /* turn on/off debugging via syslog */
int debug_ovfl; /* turn on/off debug printk in overflow handler */
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
+ int expert_mode; /* turn on/off value checking */
int debug_pfm_read;
} pfm_sysctl_t;
@@ -508,6 +508,7 @@
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
{2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
{3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
+ {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
{ 0, },
};
static ctl_table pfm_sysctl_dir[] = {
@@ -520,11 +521,8 @@
};
static struct ctl_table_header *pfm_sysctl_header;
-static void pfm_vm_close(struct vm_area_struct * area);
-
-static struct vm_operations_struct pfm_vm_ops={
- close: pfm_vm_close
-};
+static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
+static int pfm_flush(struct file *filp);
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
@@ -697,6 +695,28 @@
ia64_srlz_d();
}
+static inline void
+pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
+{
+ int i;
+
+ for (i=0; i < nibrs; i++) {
+ ia64_set_ibr(i, ibrs[i]);
+ }
+ ia64_srlz_i();
+}
+
+static inline void
+pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
+{
+ int i;
+
+ for (i=0; i < ndbrs; i++) {
+ ia64_set_dbr(i, dbrs[i]);
+ }
+ ia64_srlz_d();
+}
+
/*
* PMD[i] must be a counter. no check is made
*/
@@ -827,7 +847,10 @@
{
pfm_context_t *ctx;
- /* allocate context descriptor */
+ /*
+ * allocate context descriptor
+ * must be able to free with interrupts disabled
+ */
ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
if (ctx) {
memset(ctx, 0, sizeof(pfm_context_t));
@@ -853,7 +876,7 @@
unsigned long mask, val, ovfl_mask;
int i;
- DPRINT_ovfl(("[%d] masking monitoring for [%d]\n", current->pid, task->pid));
+ DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
ovfl_mask = pmu_conf.ovfl_val;
/*
@@ -997,6 +1020,15 @@
ia64_srlz_d();
/*
+ * must restore DBR/IBR because could be modified while masked
+ * XXX: need to optimize
+ */
+ if (ctx->ctx_fl_using_dbreg) {
+ pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
+ pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
+ }
+
+ /*
* now restore PSR
*/
if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
@@ -1106,28 +1138,6 @@
ia64_srlz_d();
}
-static inline void
-pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
-{
- int i;
-
- for (i=0; i < nibrs; i++) {
- ia64_set_ibr(i, ibrs[i]);
- }
- ia64_srlz_i();
-}
-
-static inline void
-pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
-{
- int i;
-
- for (i=0; i < ndbrs; i++) {
- ia64_set_dbr(i, dbrs[i]);
- }
- ia64_srlz_d();
-}
-
static inline int
pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
{
@@ -1684,8 +1694,7 @@
ret = pfm_do_fasync(fd, filp, ctx, on);
- DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
- current->pid,
+ DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
fd,
on,
ctx->ctx_async_queue, ret));
@@ -1707,6 +1716,8 @@
pfm_context_t *ctx = (pfm_context_t *)info;
struct pt_regs *regs = ia64_task_regs(current);
struct task_struct *owner;
+ unsigned long flags;
+ int ret;
if (ctx->ctx_cpu != smp_processor_id()) {
printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
@@ -1728,27 +1739,23 @@
return;
}
- DPRINT(("[%d] on CPU%d forcing system wide stop for [%d]\n", current->pid, smp_processor_id(), ctx->ctx_task->pid));
+ DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
/*
- * Update local PMU
+ * the context is already protected in pfm_close(), we simply
+ * need to mask interrupts to avoid a PMU interrupt race on
+ * this CPU
*/
- ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
- ia64_srlz_i();
- /*
- * update local cpuinfo
- */
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
+ local_irq_save(flags);
- pfm_clear_psr_pp();
+ ret = pfm_context_unload(ctx, NULL, 0, regs);
+ if (ret) {
+ DPRINT(("context_unload returned %d\n", ret));
+ }
/*
- * also stop monitoring in the local interrupted task
+ * unmask interrupts, PMU interrupts are now spurious here
*/
- ia64_psr(regs)->pp = 0;
-
- SET_PMU_OWNER(NULL, NULL);
+ local_irq_restore(flags);
}
static void
@@ -1756,59 +1763,38 @@
{
int ret;
- DPRINT(("[%d] calling CPU%d for cleanup\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
- DPRINT(("[%d] called CPU%d for cleanup ret=%d\n", current->pid, ctx->ctx_cpu, ret));
+ DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
}
#endif /* CONFIG_SMP */
/*
- * called either on explicit close() or from exit_files().
- *
- * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero (fput()),i.e,
- * last task to access the file. Nobody else can access the file at this point.
- *
- * When called from exit_files(), the VMA has been freed because exit_mm()
- * is executed before exit_files().
- *
- * When called from exit_files(), the current task is not yet ZOMBIE but we will
- * flush the PMU state to the context. This means * that when we see the context
- * state as TERMINATED we are guranteed to have the latest PMU state available,
- * even if the task itself is in the middle of being ctxsw out.
+ * called for each close(). Partially free resources.
+ * When caller is self-monitoring, the context is unloaded.
*/
-static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-
static int
-pfm_close(struct inode *inode, struct file *filp)
+pfm_flush(struct file *filp)
{
pfm_context_t *ctx;
struct task_struct *task;
struct pt_regs *regs;
- DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
unsigned long smpl_buf_size = 0UL;
void *smpl_buf_vaddr = NULL;
- void *smpl_buf_addr = NULL;
- int free_possible = 1;
int state, is_system;
- DPRINT(("pfm_close called private=%p\n", filp->private_data));
-
- if (!inode) {
- printk(KERN_ERR "pfm_close: NULL inode\n");
- return 0;
- }
-
if (PFM_IS_FILE(filp) == 0) {
- DPRINT(("bad magic for [%d]\n", current->pid));
+ DPRINT(("bad magic for\n"));
return -EBADF;
}
ctx = (pfm_context_t *)filp->private_data;
if (ctx == NULL) {
- printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
+ printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
return -EBADF;
}
+
/*
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
@@ -1823,7 +1809,7 @@
* signal will be sent. In both case, we are safe
*/
if (filp->f_flags & FASYNC) {
- DPRINT(("[%d] cleaning up async_queue=%p\n", current->pid, ctx->ctx_async_queue));
+ DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
pfm_do_fasync (-1, filp, ctx, 0);
}
@@ -1833,23 +1819,18 @@
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
-
-
regs = ia64_task_regs(task);
- DPRINT(("[%d] ctx_state=%d is_current=%d\n",
- current->pid, state,
+ DPRINT(("ctx_state=%d is_current=%d\n",
+ state,
task == current ? 1 : 0));
- if (state == PFM_CTX_UNLOADED || state == PFM_CTX_TERMINATED) {
- goto doit;
- }
+ /*
+ * if state == UNLOADED, then task is NULL
+ */
/*
- * context still loaded/masked and self monitoring,
- * we stop/unload and we destroy right here
- *
- * We always go here for system-wide sessions
+ * we must stop and unload because we are losing access to the context.
*/
if (task == current) {
#ifdef CONFIG_SMP
@@ -1862,46 +1843,134 @@
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
-
- UNPROTECT_CTX(ctx, flags);
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
+ /*
+ * keep context protected but unmask interrupt for IPI
+ */
+ local_irq_restore(flags);
pfm_syswide_cleanup_other_cpu(ctx);
- PROTECT_CTX(ctx, flags);
-
/*
- * short circuit pfm_context_unload();
+ * restore interrupt masking
*/
- task->thread.pfm_context = NULL;
- ctx->ctx_task = NULL;
-
- ctx->ctx_state = state = PFM_CTX_UNLOADED;
-
- pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
+ local_irq_save(flags);
+ /*
+ * context is unloaded at this point
+ */
} else
#endif /* CONFIG_SMP */
{
- DPRINT(("forcing unload on [%d]\n", current->pid));
+ DPRINT(("forcing unload\n"));
/*
* stop and unload, returning with state UNLOADED
* and session unreserved.
*/
pfm_context_unload(ctx, NULL, 0, regs);
- ctx->ctx_state = PFM_CTX_TERMINATED;
-
- DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state));
+ DPRINT(("ctx_state=%d\n", ctx->ctx_state));
}
- goto doit;
}
/*
+ * remove virtual mapping, if any, for the calling task.
+ * cannot reset ctx field until last user is calling close().
+ *
+ * ctx_smpl_vaddr must never be cleared because it is needed
+ * by every task with access to the context
+ *
+ * When called from do_exit(), the mm context is gone already, therefore
+ * mm is NULL, i.e., the VMA is already gone and we do not have to
+ * do anything here
+ */
+ if (ctx->ctx_smpl_vaddr && current->mm) {
+ smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
+ smpl_buf_size = ctx->ctx_smpl_size;
+ }
+
+ UNPROTECT_CTX(ctx, flags);
+
+ /*
+ * if there was a mapping, then we systematically remove it
+ * at this point. Cannot be done inside critical section
+ * because some VM function reenables interrupts.
+ *
+ */
+ if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
+
+ return 0;
+}
+/*
+ * called either on explicit close() or from exit_files().
+ * Only the LAST user of the file gets to this point, i.e., it is
+ * called only ONCE.
+ *
+ * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
+ * (fput()),i.e, last task to access the file. Nobody else can access the
+ * file at this point.
+ *
+ * When called from exit_files(), the VMA has been freed because exit_mm()
+ * is executed before exit_files().
+ *
+ * When called from exit_files(), the current task is not yet ZOMBIE but we
+ * flush the PMU state to the context.
+ */
+static int
+pfm_close(struct inode *inode, struct file *filp)
+{
+ pfm_context_t *ctx;
+ struct task_struct *task;
+ struct pt_regs *regs;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ unsigned long smpl_buf_size = 0UL;
+ void *smpl_buf_addr = NULL;
+ int free_possible = 1;
+ int state, is_system;
+
+ DPRINT(("pfm_close called private=%p\n", filp->private_data));
+
+ if (PFM_IS_FILE(filp) == 0) {
+ DPRINT(("bad magic\n"));
+ return -EBADF;
+ }
+
+ ctx = (pfm_context_t *)filp->private_data;
+ if (ctx == NULL) {
+ printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
+ return -EBADF;
+ }
+
+ PROTECT_CTX(ctx, flags);
+
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+
+ task = PFM_CTX_TASK(ctx);
+ regs = ia64_task_regs(task);
+
+ DPRINT(("ctx_state=%d is_current=%d\n",
+ state,
+ task == current ? 1 : 0));
+
+ /*
+ * if task == current, then pfm_flush() unloaded the context
+ */
+ if (state == PFM_CTX_UNLOADED) goto doit;
+
+ /*
+ * context is loaded/masked and task != current, we need to
+ * either force an unload or go zombie
+ */
+
+ /*
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
- * MASKED state and transition to the unloaded state by itself
+ * MASKED state and transition to the unloaded state by itself.
+ *
+ * This situation is only possible for per-task mode
*/
if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
@@ -1911,7 +1980,7 @@
*
* We cannot use the ZOMBIE state, because it is checked
* by pfm_load_regs() which is called upon wakeup from down().
- * In such cas, it would free the context and then we would
+ * In such case, it would free the context and then we would
* return to pfm_handle_work() which would access the
* stale context. Instead, we set a flag invisible to pfm_load_regs()
* but visible to pfm_handle_work().
@@ -1926,7 +1995,7 @@
*/
up(&ctx->ctx_restart_sem);
- DPRINT(("waking up ctx_state=%d for [%d]\n", state, current->pid));
+ DPRINT(("waking up ctx_state=%d\n", state));
/*
* put ourself to sleep waiting for the other
@@ -1956,11 +2025,11 @@
set_current_state(TASK_RUNNING);
/*
- * context is terminated at this point
+ * context is unloaded at this point
*/
- DPRINT(("after zombie wakeup ctx_state=%d for [%d]\n", state, current->pid));
+ DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
}
- else {
+ else if (task != current) {
#ifdef CONFIG_SMP
/*
* switch context to zombie state
@@ -1978,8 +2047,7 @@
#endif
}
-doit: /* cannot assume task is defined from now on */
-
+doit:
/* reload state, may have changed during opening of critical section */
state = ctx->ctx_state;
@@ -1987,18 +2055,9 @@
* the context is still attached to a task (possibly current)
* we cannot destroy it right now
*/
- /*
- * remove virtual mapping, if any. will be NULL when
- * called from exit_files().
- */
- if (ctx->ctx_smpl_vaddr) {
- smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
- smpl_buf_size = ctx->ctx_smpl_size;
- ctx->ctx_smpl_vaddr = NULL;
- }
/*
- * we must fre the sampling buffer right here because
+ * we must free the sampling buffer right here because
* we cannot rely on it being cleaned up later by the
* monitored task. It is not possible to free vmalloc'ed
* memory in pfm_load_regs(). Instead, we remove the buffer
@@ -2011,21 +2070,19 @@
smpl_buf_size = ctx->ctx_smpl_size;
/* no more sampling */
ctx->ctx_smpl_hdr = NULL;
+ ctx->ctx_fl_is_sampling = 0;
}
- DPRINT(("[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu\n",
- current->pid,
+ DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
state,
free_possible,
- smpl_buf_vaddr,
smpl_buf_addr,
smpl_buf_size));
if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
/*
- * UNLOADED and TERMINATED mean that the session has already been
- * unreserved.
+ * UNLOADED that the session has already been unreserved.
*/
if (state == PFM_CTX_ZOMBIE) {
pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
@@ -2047,14 +2104,9 @@
UNPROTECT_CTX(ctx, flags);
/*
- * if there was a mapping, then we systematically remove it
- * at this point. Cannot be done inside critical section
- * because some VM function reenables interrupts.
- *
* All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED.
*/
- if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
/*
@@ -2072,6 +2124,8 @@
return -ENXIO;
}
+
+
static struct file_operations pfm_file_ops = {
.llseek = pfm_lseek,
.read = pfm_read,
@@ -2080,7 +2134,8 @@
.ioctl = pfm_ioctl,
.open = pfm_no_open, /* special open code to disallow open via /proc */
.fasync = pfm_fasync,
- .release = pfm_close
+ .release = pfm_close,
+ .flush = pfm_flush
};
static int
@@ -2088,6 +2143,7 @@
{
return 1;
}
+
static struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
};
@@ -2172,27 +2228,6 @@
put_unused_fd(fd);
}
-/*
- * This function gets called from mm/mmap.c:exit_mmap() only when there is a sampling buffer
- * attached to the context AND the current task has a mapping for it, i.e., it is the original
- * creator of the context.
- *
- * This function is used to remember the fact that the vma describing the sampling buffer
- * has now been removed. It can only be called when no other tasks share the same mm context.
- *
- */
-static void
-pfm_vm_close(struct vm_area_struct *vma)
-{
- pfm_context_t *ctx = (pfm_context_t *)vma->vm_private_data;
- unsigned long flags;
-
- PROTECT_CTX(ctx, flags);
- ctx->ctx_smpl_vaddr = NULL;
- UNPROTECT_CTX(ctx, flags);
- DPRINT(("[%d] clearing vaddr for ctx %p\n", current->pid, ctx));
-}
-
static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{
@@ -2252,7 +2287,7 @@
return -ENOMEM;
}
- DPRINT(("[%d] smpl_buf @%p\n", current->pid, smpl_buf));
+ DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
@@ -2268,12 +2303,12 @@
* what we want.
*/
vma->vm_mm = mm;
- vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED|VM_DONTCOPY;
+ vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
- vma->vm_ops = &pfm_vm_ops;
+ vma->vm_ops = NULL;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
- vma->vm_private_data = ctx; /* information needed by the pfm_vm_close() function */
+ vma->vm_private_data = NULL;
/*
* Now we have everything we need and we can initialize
@@ -2342,8 +2377,7 @@
pfm_bad_permissions(struct task_struct *task)
{
/* inspired by ptrace_attach() */
- DPRINT(("[%d] cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
- current->pid,
+ DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
current->uid,
current->gid,
task->euid,
@@ -2532,11 +2566,11 @@
* no kernel task or task not owner by caller
*/
if (task->mm == NULL) {
- DPRINT(("[%d] task [%d] has not memory context (kernel thread)\n", current->pid, task->pid));
+ DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
return -EPERM;
}
if (pfm_bad_permissions(task)) {
- DPRINT(("[%d] no permission to attach to [%d]\n", current->pid, task->pid));
+ DPRINT(("no permission to attach to [%d]\n", task->pid));
return -EPERM;
}
/*
@@ -2548,7 +2582,7 @@
}
if (task->state == TASK_ZOMBIE) {
- DPRINT(("[%d] cannot attach to zombie task [%d]\n", current->pid, task->pid));
+ DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
return -EBUSY;
}
@@ -2558,7 +2592,7 @@
if (task == current) return 0;
if (task->state != TASK_STOPPED) {
- DPRINT(("[%d] cannot attach to non-stopped task [%d] state=%ld\n", current->pid, task->pid, task->state));
+ DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
return -EBUSY;
}
/*
@@ -2835,7 +2869,7 @@
task = ctx->ctx_task;
impl_pmds = pmu_conf.impl_pmds[0];
- if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL;
+ if (state == PFM_CTX_ZOMBIE) return -EINVAL;
if (is_loaded) {
thread = &task->thread;
@@ -2845,7 +2879,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
@@ -2928,7 +2962,7 @@
/*
* execute write checker, if any
*/
- if (PMC_WR_FUNC(cnum)) {
+ if (pfm_sysctl.expert_mode == 0 && PMC_WR_FUNC(cnum)) {
ret = PMC_WR_FUNC(cnum)(task, ctx, cnum, &value, regs);
if (ret) goto error;
ret = -EINVAL;
@@ -3072,7 +3106,7 @@
ovfl_mask = pmu_conf.ovfl_val;
task = ctx->ctx_task;
- if (unlikely(state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE)) return -EINVAL;
+ if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
/*
* on both UP and SMP, we can only write to the PMC when the task is
@@ -3086,7 +3120,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
@@ -3106,7 +3140,7 @@
/*
* execute write checker, if any
*/
- if (PMD_WR_FUNC(cnum)) {
+ if (pfm_sysctl.expert_mode == 0 && PMD_WR_FUNC(cnum)) {
unsigned long v = value;
ret = PMD_WR_FUNC(cnum)(task, ctx, cnum, &v, regs);
@@ -3279,7 +3313,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
/*
@@ -3347,7 +3381,7 @@
/*
* execute read checker, if any
*/
- if (unlikely(PMD_RD_FUNC(cnum))) {
+ if (unlikely(pfm_sysctl.expert_mode == 0 && PMD_RD_FUNC(cnum))) {
unsigned long v = val;
ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs);
if (ret) goto error;
@@ -3376,14 +3410,14 @@
return ret;
}
-long
-pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs)
+int
+pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
- if (task == NULL || req == NULL) return -EINVAL;
+ if (req == NULL) return -EINVAL;
- ctx = task->thread.pfm_context;
+ ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
@@ -3391,20 +3425,19 @@
* for now limit to current task, which is enough when calling
* from overflow handler
*/
- if (task != current) return -EBUSY;
+ if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
return pfm_write_pmcs(ctx, req, nreq, regs);
}
EXPORT_SYMBOL(pfm_mod_write_pmcs);
-long
-pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs)
+int
+pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
{
pfm_context_t *ctx;
- if (task == NULL || req == NULL) return -EINVAL;
+ if (req == NULL) return -EINVAL;
- //ctx = task->thread.pfm_context;
ctx = GET_PMU_CTX();
if (ctx == NULL) return -EINVAL;
@@ -3419,48 +3452,6 @@
}
EXPORT_SYMBOL(pfm_mod_read_pmds);
-long
-pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs)
-{
- pfm_context_t *ctx;
- unsigned long m, val;
- unsigned int j;
-
- if (task == NULL || addr == NULL) return -EINVAL;
-
- //ctx = task->thread.pfm_context;
- ctx = GET_PMU_CTX();
-
- if (ctx == NULL) return -EINVAL;
-
- /*
- * for now limit to current task, which is enough when calling
- * from overflow handler
- */
- if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
-
- m = mask[0];
- for (j=0; m; m >>=1, j++) {
-
- if ((m & 0x1) == 0) continue;
-
- if (!(PMD_IS_IMPL(j) && CTX_IS_USED_PMD(ctx, j)) ) return -EINVAL;
-
- if (PMD_IS_COUNTING(j)) {
- val = pfm_read_soft_counter(ctx, j);
- } else {
- val = ia64_get_pmd(j);
- }
-
- *addr++ = val;
-
- /* XXX: should call read checker routine? */
- DPRINT(("single_read_pmd[%u]=0x%lx\n", j, val));
- }
- return 0;
-}
-EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
-
/*
* Only call this function when a process it trying to
* write the debug registers (reading is always allowed)
@@ -3565,9 +3556,6 @@
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
return -EBUSY;
- case PFM_CTX_TERMINATED:
- DPRINT(("context is terminated, nothing to do\n"));
- return 0;
default:
DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
return -EINVAL;
@@ -3579,7 +3567,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
@@ -3739,7 +3727,7 @@
is_system = ctx->ctx_fl_system;
task = ctx->ctx_task;
- if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL;
+ if (state == PFM_CTX_ZOMBIE) return -EINVAL;
/*
* on both UP and SMP, we can only write to the PMC when the task is
@@ -3753,7 +3741,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
@@ -3920,6 +3908,49 @@
return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
}
+int
+pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
+{
+ pfm_context_t *ctx;
+
+ if (req == NULL) return -EINVAL;
+
+ ctx = GET_PMU_CTX();
+
+ if (ctx == NULL) return -EINVAL;
+
+ /*
+ * for now limit to current task, which is enough when calling
+ * from overflow handler
+ */
+ if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
+
+ return pfm_write_ibrs(ctx, req, nreq, regs);
+}
+EXPORT_SYMBOL(pfm_mod_write_ibrs);
+
+int
+pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
+{
+ pfm_context_t *ctx;
+
+ if (req == NULL) return -EINVAL;
+
+ ctx = GET_PMU_CTX();
+
+ if (ctx == NULL) return -EINVAL;
+
+ /*
+ * for now limit to current task, which is enough when calling
+ * from overflow handler
+ */
+ if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
+
+ return pfm_write_dbrs(ctx, req, nreq, regs);
+}
+EXPORT_SYMBOL(pfm_mod_write_dbrs);
+
+
static int
pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
@@ -3947,11 +3978,10 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
- DPRINT(("current [%d] task [%d] ctx_state=%d is_system=%d\n",
- current->pid,
+ DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
PFM_CTX_TASK(ctx)->pid,
state,
is_system));
@@ -4010,7 +4040,7 @@
* monitoring disabled in kernel at next reschedule
*/
ctx->ctx_saved_psr_up = 0;
- DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid));
+ DPRINT(("task=[%d]\n", task->pid));
}
return 0;
}
@@ -4033,7 +4063,7 @@
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
- DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
return -EBUSY;
}
@@ -4167,9 +4197,8 @@
/*
* can only load from unloaded or terminated state
*/
- if (state != PFM_CTX_UNLOADED && state != PFM_CTX_TERMINATED) {
- DPRINT(("[%d] cannot load to [%d], invalid ctx_state=%d\n",
- current->pid,
+ if (state != PFM_CTX_UNLOADED) {
+ DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
req->load_pid,
ctx->ctx_state));
return -EINVAL;
@@ -4178,7 +4207,7 @@
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
- DPRINT(("cannot use blocking mode on self for [%d]\n", current->pid));
+ DPRINT(("cannot use blocking mode on self\n"));
return -EINVAL;
}
@@ -4194,8 +4223,7 @@
* system wide is self monitoring only
*/
if (is_system && task != current) {
- DPRINT(("system wide is self monitoring only current=%d load_pid=%d\n",
- current->pid,
+ DPRINT(("system wide is self monitoring only load_pid=%d\n",
req->load_pid));
goto error;
}
@@ -4264,8 +4292,7 @@
*
* XXX: needs to be atomic
*/
- DPRINT(("[%d] before cmpxchg() old_ctx=%p new_ctx=%p\n",
- current->pid,
+ DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
thread->pfm_context, ctx));
old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
@@ -4409,19 +4436,19 @@
{
struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs;
- int state, is_system;
+ int prev_state, is_system;
int ret;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
- state = ctx->ctx_state;
- is_system = ctx->ctx_fl_system;
+ prev_state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
/*
* unload only when necessary
*/
- if (state == PFM_CTX_TERMINATED || state == PFM_CTX_UNLOADED) {
- DPRINT(("[%d] ctx_state=%d, nothing to do\n", current->pid, ctx->ctx_state));
+ if (prev_state == PFM_CTX_UNLOADED) {
+ DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
return 0;
}
@@ -4431,7 +4458,7 @@
ret = pfm_stop(ctx, NULL, 0, regs);
if (ret) return ret;
- ctx->ctx_state = state = PFM_CTX_UNLOADED;
+ ctx->ctx_state = PFM_CTX_UNLOADED;
/*
* in system mode, we need to update the PMU directly
@@ -4458,7 +4485,8 @@
* at this point we are done with the PMU
* so we can unreserve the resource.
*/
- pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
+ if (prev_state != PFM_CTX_ZOMBIE)
+ pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
/*
* disconnect context from task
@@ -4497,8 +4525,11 @@
/*
* at this point we are done with the PMU
* so we can unreserve the resource.
+ *
+ * when state was ZOMBIE, we have already unreserved.
*/
- pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
+ if (prev_state != PFM_CTX_ZOMBIE)
+ pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
/*
* reset activation counter and psr
@@ -4549,12 +4580,14 @@
task->thread.pfm_context = NULL;
task->thread.flags &= ~IA64_THREAD_PM_VALID;
- DPRINT(("context <%d> force cleanup for [%d] by [%d]\n", ctx->ctx_fd, task->pid, current->pid));
+ DPRINT(("force cleanupf for [%d]\n", task->pid));
}
+
/*
* called only from exit_thread(): task == current
+ * we come here only if current has a context attached (loaded or masked)
*/
void
pfm_exit_thread(struct task_struct *task)
@@ -4575,7 +4608,8 @@
switch(state) {
case PFM_CTX_UNLOADED:
/*
- * come here only if attached
+ * only comes to thios function if pfm_context is not NULL, i.e., cannot
+ * be in unloaded state
*/
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
break;
@@ -4583,20 +4617,17 @@
case PFM_CTX_MASKED:
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
}
- ctx->ctx_state = PFM_CTX_TERMINATED;
- DPRINT(("ctx terminated by [%d]\n", task->pid));
+ DPRINT(("ctx unloaded for current state was %d\n", state));
pfm_end_notify_user(ctx);
break;
case PFM_CTX_ZOMBIE:
- pfm_clear_psr_up();
-
- BUG_ON(ctx->ctx_smpl_hdr);
-
- pfm_force_cleanup(ctx, regs);
-
+ ret = pfm_context_unload(ctx, NULL, 0, regs);
+ if (ret) {
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
+ }
free_ok = 1;
break;
default:
@@ -4696,7 +4727,7 @@
if (task == current || ctx->ctx_fl_system) return 0;
/*
- * context is UNLOADED, MASKED, TERMINATED we are safe to go
+ * context is UNLOADED, MASKED we are safe to go
*/
if (state != PFM_CTX_LOADED) return 0;
@@ -4749,7 +4780,7 @@
if (unlikely(PFM_IS_DISABLED())) return -ENOSYS;
if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
- DPRINT(("[%d] invalid cmd=%d\n", current->pid, cmd));
+ DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL;
}
@@ -4760,7 +4791,7 @@
cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
if (unlikely(func == NULL)) {
- DPRINT(("[%d] invalid cmd=%d\n", current->pid, cmd));
+ DPRINT(("invalid cmd=%d\n", cmd));
return -EINVAL;
}
@@ -4803,7 +4834,7 @@
* assume sz = 0 for command without parameters
*/
if (sz && copy_from_user(args_k, arg, sz)) {
- DPRINT(("[%d] cannot copy_from_user %lu bytes @%p\n", current->pid, sz, arg));
+ DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
goto error_args;
}
@@ -4819,7 +4850,7 @@
completed_args = 1;
- DPRINT(("[%d] restart_args sz=%lu xtra_sz=%lu\n", current->pid, sz, xtra_sz));
+ DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
/* retry if necessary */
if (likely(xtra_sz)) goto restart_args;
@@ -4831,17 +4862,17 @@
file = fget(fd);
if (unlikely(file == NULL)) {
- DPRINT(("[%d] invalid fd %d\n", current->pid, fd));
+ DPRINT(("invalid fd %d\n", fd));
goto error_args;
}
if (unlikely(PFM_IS_FILE(file) == 0)) {
- DPRINT(("[%d] fd %d not related to perfmon\n", current->pid, fd));
+ DPRINT(("fd %d not related to perfmon\n", fd));
goto error_args;
}
ctx = (pfm_context_t *)file->private_data;
if (unlikely(ctx == NULL)) {
- DPRINT(("[%d] no context for fd %d\n", current->pid, fd));
+ DPRINT(("no context for fd %d\n", fd));
goto error_args;
}
prefetch(&ctx->ctx_state);
@@ -4861,7 +4892,7 @@
abort_locked:
if (likely(ctx)) {
- DPRINT(("[%d] context unlocked\n", current->pid));
+ DPRINT(("context unlocked\n"));
UNPROTECT_CTX(ctx, flags);
fput(file);
}
@@ -4945,12 +4976,7 @@
current->thread.flags &= ~IA64_THREAD_PM_VALID;
ctx->ctx_task = NULL;
- /*
- * switch to terminated state
- */
- ctx->ctx_state = PFM_CTX_TERMINATED;
-
- DPRINT(("context <%d> terminated for [%d]\n", ctx->ctx_fd, current->pid));
+ DPRINT(("context terminated\n"));
/*
* and wakeup controlling task, indicating we are now disconnected
@@ -4995,15 +5021,15 @@
*/
reason = ctx->ctx_fl_trap_reason;
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
+ ovfl_regs = ctx->ctx_ovfl_regs[0];
- DPRINT(("[%d] reason=%d state=%d\n", current->pid, reason, ctx->ctx_state));
+ DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
/*
- * must be done before we check non-blocking mode
+ * must be done before we check for simple-reset mode
*/
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
- ovfl_regs = ctx->ctx_ovfl_regs[0];
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
@@ -5022,6 +5048,14 @@
PROTECT_CTX(ctx, flags);
+ /*
+ * we need to read the ovfl_regs only after wake-up
+ * because we may have had pfm_write_pmds() in between
+ * and that can changed PMD values and therefore
+ * ovfl_regs is reset for these new PMD values.
+ */
+ ovfl_regs = ctx->ctx_ovfl_regs[0];
+
if (ctx->ctx_fl_going_zombie) {
do_zombie:
DPRINT(("context is zombie, bailing out\n"));
@@ -5050,7 +5084,7 @@
return 0;
}
- DPRINT(("[%d] waking up somebody\n", current->pid));
+ DPRINT(("waking up somebody\n"));
if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
@@ -5085,11 +5119,10 @@
msg->pfm_ovfl_msg.msg_tstamp = 0UL;
}
- DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n",
+ DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
msg,
ctx->ctx_fl_no_msg,
ctx->ctx_fd,
- current->pid,
ovfl_pmds));
return pfm_notify_user(ctx, msg);
@@ -5112,10 +5145,10 @@
msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_tstamp = 0UL;
- DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d pid=%d\n",
+ DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
msg,
ctx->ctx_fl_no_msg,
- ctx->ctx_fd, current->pid));
+ ctx->ctx_fd));
return pfm_notify_user(ctx, msg);
}
@@ -5275,8 +5308,7 @@
* when the module cannot handle the rest of the overflows, we abort right here
*/
if (ret && pmd_mask) {
- DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n",
- current->pid,
+ DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
pmd_mask<pid,
+ DPRINT(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n",
ovfl_pmds,
reset_pmds));
/*
@@ -5341,8 +5372,7 @@
must_notify = 1;
}
- DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
- current->pid,
+ DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason,
@@ -5521,6 +5551,7 @@
p += sprintf(p, "perfmon version : %u.%u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN);
p += sprintf(p, "model : %s\n", pmu_conf.pmu_name);
p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
+ p += sprintf(p, "expert mode : %s\n", pfm_sysctl.expert_mode > 0 ? "Yes": "No");
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) {
@@ -6490,7 +6521,7 @@
{
struct thread_struct *thread;
- DPRINT(("perfmon: pfm_inherit clearing state for [%d] current [%d]\n", task->pid, current->pid));
+ DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
thread = &task->thread;
diff -Nru a/arch/ia64/kernel/perfmon_mckinley.h b/arch/ia64/kernel/perfmon_mckinley.h
--- a/arch/ia64/kernel/perfmon_mckinley.h Tue Apr 13 17:34:24 2004
+++ b/arch/ia64/kernel/perfmon_mckinley.h Tue Apr 13 17:34:24 2004
@@ -101,6 +101,7 @@
{
int ret = 0, check_case1 = 0;
unsigned long val8 = 0, val14 = 0, val13 = 0;
+ int is_loaded;
/* first preserve the reserved fields */
pfm_mck_reserved(cnum, val, regs);
@@ -108,6 +109,8 @@
/* sanitfy check */
if (ctx == NULL) return -EINVAL;
+ is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
+
/*
* we must clear the debug registers if pmc13 has a value which enable
* memory pipeline event constraints. In this case we need to clear the
@@ -120,7 +123,9 @@
*
* For now, we just check on cfg_dbrXX != 0x3.
*/
- if (cnum == 13 && ((*val & 0x18181818UL) != 0x18181818UL) && ctx->ctx_fl_using_dbreg == 0) {
+ DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
+
+ if (cnum == 13 && is_loaded && ((*val & 0x18181818UL) != 0x18181818UL) && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
@@ -131,14 +136,14 @@
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
- if (cnum == 14 && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
+ if (cnum == 14 && is_loaded && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val));
@@ -149,7 +154,7 @@
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
if (ret) return ret;
}
diff -Nru a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
--- a/arch/ia64/kernel/signal.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/kernel/signal.c Tue Apr 13 17:34:23 2004
@@ -445,6 +445,7 @@
scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
scr->pt.cr_iip = tramp_addr;
ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
+ ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */
/*
* Force the interruption function mask to zero. This has no effect when a
* system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is
diff -Nru a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
--- a/arch/ia64/kernel/sys_ia64.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/kernel/sys_ia64.c Tue Apr 13 17:34:23 2004
@@ -201,9 +201,15 @@
* A zero mmap always succeeds in Linux, independent of whether or not the
* remaining arguments are valid.
*/
- len = PAGE_ALIGN(len);
if (len == 0)
goto out;
+
+ /* Careful about overflows.. */
+ len = PAGE_ALIGN(len);
+ if (!len || len > TASK_SIZE) {
+ addr = -EINVAL;
+ goto out;
+ }
/*
* Don't permit mappings into unmapped space, the virtual page table of a region,
diff -Nru a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
--- a/arch/ia64/mm/hugetlbpage.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/mm/hugetlbpage.c Tue Apr 13 17:34:23 2004
@@ -9,6 +9,7 @@
*/
#include
+#include
#include
#include
#include
diff -Nru a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
--- a/arch/ia64/pci/pci.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/pci/pci.c Tue Apr 13 17:34:23 2004
@@ -53,56 +53,33 @@
* synchronization mechanism here.
*/
-#define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
- ((u64)(seg << 24) | (u64)(bus << 16) | \
+#define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
+ ((u64)(seg << 24) | (u64)(bus << 16) | \
(u64)(devfn << 8) | (u64)(reg))
-static int
-pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
-{
- int result = 0;
- u64 data = 0;
-
- if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(seg, bus, devfn, reg), 0, len, &data);
-
- *value = (u32) data;
-
- return result;
-}
-
-static int
-pci_sal_write (int seg, int bus, int devfn, int reg, int len, u32 value)
-{
- if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 255))
- return -EINVAL;
-
- return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(seg, bus, devfn, reg), 0, len, value);
-}
-
-static struct pci_raw_ops pci_sal_ops = {
- .read = pci_sal_read,
- .write = pci_sal_write
-};
-
/* SAL 3.2 adds support for extended config space. */
-#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
- ((u64)(seg << 28) | (u64)(bus << 20) | \
+#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
+ ((u64)(seg << 28) | (u64)(bus << 20) | \
(u64)(devfn << 12) | (u64)(reg))
static int
-pci_sal_ext_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
+pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
{
+ u64 addr, mode, data = 0;
int result = 0;
- u64 data = 0;
- if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
+ if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
- result = ia64_sal_pci_config_read(PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg), 1, len, &data);
+ if ((seg | reg) <= 255) {
+ addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
+ mode = 0;
+ } else {
+ addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
+ mode = 1;
+ }
+ result = ia64_sal_pci_config_read(addr, mode, len, &data);
*value = (u32) data;
@@ -110,46 +87,42 @@
}
static int
-pci_sal_ext_write (int seg, int bus, int devfn, int reg, int len, u32 value)
+pci_sal_write (int seg, int bus, int devfn, int reg, int len, u32 value)
{
+ u64 addr, mode;
+
if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
- return ia64_sal_pci_config_write(PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg), 1, len, value);
-}
-
-static struct pci_raw_ops pci_sal_ext_ops = {
- .read = pci_sal_ext_read,
- .write = pci_sal_ext_write
-};
-
-struct pci_raw_ops *raw_pci_ops = &pci_sal_ops; /* default to SAL < 3.2 */
-
-static int __init
-pci_set_sal_ops (void)
-{
- if (sal_revision >= SAL_VERSION_CODE(3, 2)) {
- printk("Using SAL 3.2 to access PCI config space\n");
- raw_pci_ops = &pci_sal_ext_ops;
+ if ((seg | reg) <= 255) {
+ addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
+ mode = 0;
+ } else {
+ addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
+ mode = 1;
}
- return 0;
+ return ia64_sal_pci_config_write(addr, mode, len, value);
}
-arch_initcall(pci_set_sal_ops);
+static struct pci_raw_ops pci_sal_ops = {
+ .read = pci_sal_read,
+ .write = pci_sal_write
+};
+struct pci_raw_ops *raw_pci_ops = &pci_sal_ops;
static int
pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{
return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
- devfn, where, size, value);
+ devfn, where, size, value);
}
static int
pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{
return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
- devfn, where, size, value);
+ devfn, where, size, value);
}
static struct pci_ops pci_root_ops = {
@@ -382,8 +355,7 @@
for (ln = b->devices.next; ln != &b->devices; ln = ln->next)
pcibios_fixup_device_resources(pci_dev_b(ln), b);
-
- return;
+ pci_read_bridge_bases(b);
}
void __devinit
@@ -441,7 +413,6 @@
if (ret < 0)
return ret;
- printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, pci_name(dev));
return acpi_pci_irq_enable(dev);
}
diff -Nru a/arch/ia64/sn/io/hwgfs/interface.c b/arch/ia64/sn/io/hwgfs/interface.c
--- a/arch/ia64/sn/io/hwgfs/interface.c Tue Apr 13 17:34:24 2004
+++ b/arch/ia64/sn/io/hwgfs/interface.c Tue Apr 13 17:34:24 2004
@@ -36,7 +36,7 @@
memcpy(buf, *path, len);
buf[len] = '\0';
- error = link_path_walk(buf, nd);
+ error = path_walk(buf, nd);
if (unlikely(error))
return error;
@@ -83,7 +83,7 @@
if (unlikely(error))
return error;
- error = link_path_walk(name, &nd);
+ error = path_walk(name, &nd);
if (unlikely(error))
return error;
@@ -274,7 +274,7 @@
nd.dentry = dget(base ? base : hwgfs_vfsmount->mnt_sb->s_root);
nd.flags = (traverse_symlinks ? LOOKUP_FOLLOW : 0);
- error = link_path_walk(name, &nd);
+ error = path_walk(name, &nd);
if (likely(!error)) {
dentry = nd.dentry;
path_release(&nd); /* stale data from here! */
diff -Nru a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c
--- a/arch/ia64/sn/io/machvec/pci_bus_cvlink.c Tue Apr 13 17:34:24 2004
+++ b/arch/ia64/sn/io/machvec/pci_bus_cvlink.c Tue Apr 13 17:34:24 2004
@@ -811,7 +811,6 @@
/*
* set pci_raw_ops, etc.
*/
-
sgi_master_io_infr_init();
for (cnode = 0; cnode < numnodes; cnode++) {
@@ -826,16 +825,16 @@
#endif
controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
- if (controller) {
- memset(controller, 0, sizeof(struct pci_controller));
- /* just allocate some devices and fill in the pci_dev structs */
- for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
- pci_scan_bus(i, &sn_pci_ops, controller);
+ if (!controller) {
+ printk(KERN_WARNING "cannot allocate PCI controller\n");
+ return 0;
}
- /*
- * actually find devices and fill in hwgraph structs
- */
+ memset(controller, 0, sizeof(struct pci_controller));
+
+ for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
+ if (pci_bus_to_vertex(i))
+ pci_scan_bus(i, &sn_pci_ops, controller);
done_probing = 1;
@@ -857,13 +856,8 @@
* set the root start and end so that drivers calling check_region()
* won't see a conflict
*/
-
-#ifdef CONFIG_IA64_SGI_SN_SIM
- if (! IS_RUNNING_ON_SIMULATOR()) {
- ioport_resource.start = 0xc000000000000000;
- ioport_resource.end = 0xcfffffffffffffff;
- }
-#endif
+ ioport_resource.start = 0xc000000000000000;
+ ioport_resource.end = 0xcfffffffffffffff;
/*
* Set the root start and end for Mem Resource.
diff -Nru a/arch/ia64/sn/io/machvec/pci_dma.c b/arch/ia64/sn/io/machvec/pci_dma.c
--- a/arch/ia64/sn/io/machvec/pci_dma.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/sn/io/machvec/pci_dma.c Tue Apr 13 17:34:23 2004
@@ -391,11 +391,9 @@
dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA |
MINIMAL_ATE_FLAG(phys_addr, size));
- if (!dma_map) {
- printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
- "32 bit page map entries.\n");
+ /* PMU out of entries */
+ if (!dma_map)
return 0;
- }
dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size);
dma_map->bd_dma_addr = dma_addr;
@@ -655,6 +653,12 @@
int
sn_dma_mapping_error(dma_addr_t dma_addr)
{
+ /*
+ * We can only run out of page mapping entries, so if there's
+ * an error, tell the caller to try again later.
+ */
+ if (!dma_addr)
+ return -EAGAIN;
return 0;
}
diff -Nru a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
--- a/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c Tue Apr 13 17:34:23 2004
+++ b/arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c Tue Apr 13 17:34:23 2004
@@ -47,6 +47,7 @@
cfg_p pcibr_find_capability(cfg_p, unsigned);
extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
+int pcibr_slot_pwr(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot, int up, char *err_msg);
/*
@@ -351,7 +352,7 @@
goto enable_unlock;
}
- error = pcibr_slot_attach(pcibr_vhdl, slot, NULL,
+ error = pcibr_slot_attach(pcibr_vhdl, slot, 0,
req_p->req_resp.resp_l1_msg,
&req_p->req_resp.resp_sub_errno);
diff -Nru a/drivers/char/random.c b/drivers/char/random.c
--- a/drivers/char/random.c Tue Apr 13 17:34:23 2004
+++ b/drivers/char/random.c Tue Apr 13 17:34:23 2004
@@ -490,12 +490,15 @@
**********************************************************************/
struct entropy_store {
+ /* mostly-read data: */
+ struct poolinfo poolinfo;
+ __u32 *pool;
+
+ /* read-write data: */
+ spinlock_t lock ____cacheline_aligned;
unsigned add_ptr;
int entropy_count;
int input_rotate;
- struct poolinfo poolinfo;
- __u32 *pool;
- spinlock_t lock;
};
/*
@@ -571,37 +574,59 @@
static __u32 const twist_table[8] = {
0, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
- unsigned i;
- int new_rotate;
+ unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
+ int new_rotate, input_rotate;
int wordmask = r->poolinfo.poolwords - 1;
- __u32 w;
+ __u32 w, next_w;
unsigned long flags;
+ char *cp;
+
+ /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo.tap1;
+ tap2 = r->poolinfo.tap2;
+ tap3 = r->poolinfo.tap3;
+ tap4 = r->poolinfo.tap4;
+ tap5 = r->poolinfo.tap5;
+ next_w = *in++;
spin_lock_irqsave(&r->lock, flags);
+#ifdef ARCH_HAS_PREFETCH
+ for (cp = (char *) r->pool; cp <= (char *) (r->pool + wordmask); cp += PREFETCH_STRIDE)
+ prefetch(cp);
+#endif
+
+ input_rotate = r->input_rotate;
+ add_ptr = r->add_ptr;
+
while (nwords--) {
- w = rotate_left(r->input_rotate, *in++);
- i = r->add_ptr = (r->add_ptr - 1) & wordmask;
+ w = rotate_left(input_rotate, next_w);
+ if (nwords > 0)
+ next_w = *in++;
+ i = add_ptr = (add_ptr - 1) & wordmask;
/*
* Normally, we add 7 bits of rotation to the pool.
* At the beginning of the pool, add an extra 7 bits
* rotation, so that successive passes spread the
* input bits across the pool evenly.
*/
- new_rotate = r->input_rotate + 14;
+ new_rotate = input_rotate + 14;
if (i)
- new_rotate = r->input_rotate + 7;
- r->input_rotate = new_rotate & 31;
+ new_rotate = input_rotate + 7;
+ input_rotate = new_rotate & 31;
/* XOR in the various taps */
- w ^= r->pool[(i + r->poolinfo.tap1) & wordmask];
- w ^= r->pool[(i + r->poolinfo.tap2) & wordmask];
- w ^= r->pool[(i + r->poolinfo.tap3) & wordmask];
- w ^= r->pool[(i + r->poolinfo.tap4) & wordmask];
- w ^= r->pool[(i + r->poolinfo.tap5) & wordmask];
+ w ^= r->pool[(i + tap1) & wordmask];
+ w ^= r->pool[(i + tap2) & wordmask];
+ w ^= r->pool[(i + tap3) & wordmask];
+ w ^= r->pool[(i + tap4) & wordmask];
+ w ^= r->pool[(i + tap5) & wordmask];
w ^= r->pool[i];
r->pool[i] = (w >> 3) ^ twist_table[w & 7];
}
+
+ r->input_rotate = input_rotate;
+ r->add_ptr = add_ptr;
spin_unlock_irqrestore(&r->lock, flags);
}
diff -Nru a/drivers/char/sn_serial.c b/drivers/char/sn_serial.c
--- a/drivers/char/sn_serial.c Tue Apr 13 17:34:24 2004
+++ b/drivers/char/sn_serial.c Tue Apr 13 17:34:24 2004
@@ -82,10 +82,10 @@
static unsigned long sn_interrupt_timeout;
extern u64 master_node_bedrock_address;
-static int sn_debug_printf(const char *fmt, ...);
#undef DEBUG
#ifdef DEBUG
+static int sn_debug_printf(const char *fmt, ...);
#define DPRINTF(x...) sn_debug_printf(x)
#else
#define DPRINTF(x...) do { } while (0)
@@ -247,6 +247,7 @@
sn_func->sal_puts(s, count);
}
+#ifdef DEBUG
/* this is as "close to the metal" as we can get, used when the driver
* itself may be broken */
static int
@@ -262,6 +263,7 @@
va_end(args);
return printed_len;
}
+#endif /* DEBUG */
/*
* Interrupt handling routines.
diff -Nru a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
--- a/drivers/media/radio/Makefile Tue Apr 13 17:34:23 2004
+++ b/drivers/media/radio/Makefile Tue Apr 13 17:34:23 2004
@@ -2,6 +2,8 @@
# Makefile for the kernel character device drivers.
#
+obj-y := dummy.o
+
miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o
obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o
diff -Nru a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/media/radio/dummy.c Tue Apr 13 17:34:24 2004
@@ -0,0 +1 @@
+/* just so the linker knows what kind of object files it's deadling with... */
diff -Nru a/drivers/media/video/Makefile b/drivers/media/video/Makefile
--- a/drivers/media/video/Makefile Tue Apr 13 17:34:23 2004
+++ b/drivers/media/video/Makefile Tue Apr 13 17:34:23 2004
@@ -7,6 +7,7 @@
zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o
zr36067-objs := zoran_procfs.o zoran_device.o \
zoran_driver.o zoran_card.o
+obj-y := dummy.o
obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o
diff -Nru a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/media/video/dummy.c Tue Apr 13 17:34:24 2004
@@ -0,0 +1 @@
+/* just so the linker knows what kind of object files it's deadling with... */
diff -Nru a/drivers/serial/8250_hcdp.c b/drivers/serial/8250_hcdp.c
--- a/drivers/serial/8250_hcdp.c Tue Apr 13 17:34:24 2004
+++ b/drivers/serial/8250_hcdp.c Tue Apr 13 17:34:24 2004
@@ -186,6 +186,8 @@
port.irq = gsi;
#endif
port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_RESOURCES;
+ if (gsi)
+ port.flags |= UPF_AUTO_IRQ;
/*
* Note: the above memset() initializes port.line to 0,
diff -Nru a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
--- a/drivers/usb/serial/ipaq.c Tue Apr 13 17:34:24 2004
+++ b/drivers/usb/serial/ipaq.c Tue Apr 13 17:34:24 2004
@@ -222,7 +222,7 @@
* discipline instead of queueing.
*/
- port->tty->low_latency = 1;
+ port->tty->low_latency = 0;
port->tty->raw = 1;
port->tty->real_raw = 1;
diff -Nru a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h
--- a/include/asm-ia64/machvec_hpzx1.h Tue Apr 13 17:34:23 2004
+++ b/include/asm-ia64/machvec_hpzx1.h Tue Apr 13 17:34:23 2004
@@ -2,6 +2,7 @@
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_setup_t sba_setup;
extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
extern ia64_mv_dma_free_coherent sba_free_coherent;
extern ia64_mv_dma_map_single sba_map_single;
@@ -19,7 +20,7 @@
* the macros are used directly.
*/
#define platform_name "hpzx1"
-#define platform_setup dig_setup
+#define platform_setup sba_setup
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_free_coherent sba_free_coherent
diff -Nru a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h
--- a/include/asm-ia64/perfmon.h Tue Apr 13 17:34:23 2004
+++ b/include/asm-ia64/perfmon.h Tue Apr 13 17:34:23 2004
@@ -242,9 +242,10 @@
/*
* perfmon interface exported to modules
*/
-extern long pfm_mod_fast_read_pmds(struct task_struct *, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs);
-extern long pfm_mod_read_pmds(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs);
-extern long pfm_mod_write_pmcs(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
/*
* describe the content of the local_cpu_date->pfm_syst_info field
diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
--- a/include/asm-ia64/processor.h Tue Apr 13 17:34:23 2004
+++ b/include/asm-ia64/processor.h Tue Apr 13 17:34:23 2004
@@ -137,14 +137,6 @@
* state comes earlier:
*/
struct cpuinfo_ia64 {
- /* irq_stat must be 64-bit aligned */
- union {
- struct {
- __u32 irq_count;
- __u32 bh_count;
- } f;
- __u64 irq_and_bh_counts;
- } irq_stat;
__u32 softirq_pending;
__u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */
diff -Nru a/kernel/printk.c b/kernel/printk.c
--- a/kernel/printk.c Tue Apr 13 17:34:24 2004
+++ b/kernel/printk.c Tue Apr 13 17:34:24 2004
@@ -402,6 +402,12 @@
__call_console_drivers(start, end);
}
}
+#ifdef CONFIG_IA64_EARLY_PRINTK
+ if (!console_drivers) {
+ void early_printk (const char *str, size_t len);
+ early_printk(&LOG_BUF(start), end - start);
+ }
+#endif
}
/*
@@ -730,7 +736,11 @@
* for us.
*/
spin_lock_irqsave(&logbuf_lock, flags);
+#ifdef CONFIG_IA64_EARLY_PRINTK
+ con_start = log_end;
+#else
con_start = log_start;
+#endif
spin_unlock_irqrestore(&logbuf_lock, flags);
}
release_console_sem();
@@ -832,3 +842,117 @@
printk_ratelimit_burst);
}
EXPORT_SYMBOL(printk_ratelimit);
+
+#ifdef CONFIG_IA64_EARLY_PRINTK
+
+#include
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+
+
+#define VGABASE ((char *)0xc0000000000b8000)
+#define VGALINES 24
+#define VGACOLS 80
+
+static int current_ypos = VGALINES, current_xpos = 0;
+
+static void
+early_printk_vga (const char *str, size_t len)
+{
+ char c;
+ int i, k, j;
+
+ while (len-- > 0) {
+ c = *str++;
+ if (current_ypos >= VGALINES) {
+ /* scroll 1 line up */
+ for (k = 1, j = 0; k < VGALINES; k++, j++) {
+ for (i = 0; i < VGACOLS; i++) {
+ writew(readw(VGABASE + 2*(VGACOLS*k + i)),
+ VGABASE + 2*(VGACOLS*j + i));
+ }
+ }
+ for (i = 0; i < VGACOLS; i++) {
+ writew(0x720, VGABASE + 2*(VGACOLS*j + i));
+ }
+ current_ypos = VGALINES-1;
+ }
+ if (c == '\n') {
+ current_xpos = 0;
+ current_ypos++;
+ } else if (c != '\r') {
+ writew(((0x7 << 8) | (unsigned short) c),
+ VGABASE + 2*(VGACOLS*current_ypos + current_xpos++));
+ if (current_xpos >= VGACOLS) {
+ current_xpos = 0;
+ current_ypos++;
+ }
+ }
+ }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_VGA */
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_UART
+
+#include
+#include
+
+static void early_printk_uart(const char *str, size_t len)
+{
+ static char *uart = NULL;
+ unsigned long uart_base;
+ char c;
+
+ if (!uart) {
+ uart_base = 0;
+# ifdef CONFIG_SERIAL_8250_HCDP
+ {
+ extern unsigned long hcdp_early_uart(void);
+ uart_base = hcdp_early_uart();
+ }
+# endif
+# if CONFIG_IA64_EARLY_PRINTK_UART_BASE
+ if (!uart_base)
+ uart_base = CONFIG_IA64_EARLY_PRINTK_UART_BASE;
+# endif
+ if (!uart_base)
+ return;
+
+ uart = ioremap(uart_base, 64);
+ if (!uart)
+ return;
+ }
+
+ while (len-- > 0) {
+ c = *str++;
+ while ((readb(uart + UART_LSR) & UART_LSR_TEMT) == 0)
+ cpu_relax(); /* spin */
+
+ writeb(c, uart + UART_TX);
+
+ if (c == '\n')
+ writeb('\r', uart + UART_TX);
+ }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_UART */
+
+#ifdef CONFIG_IA64_EARLY_PRINTK_SGI_SN
+extern int early_printk_sn_sal(const char *str, int len);
+#endif
+
+void early_printk(const char *str, size_t len)
+{
+#ifdef CONFIG_IA64_EARLY_PRINTK_UART
+ early_printk_uart(str, len);
+#endif
+#ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+ early_printk_vga(str, len);
+#endif
+#ifdef CONFIG_IA64_EARLY_PRINTK_SGI_SN
+ early_printk_sn_sal(str, len);
+#endif
+}
+
+#endif /* CONFIG_IA64_EARLY_PRINTK */