diff -Nru a/Documentation/ia64/fsys.txt b/Documentation/ia64/fsys.txt --- a/Documentation/ia64/fsys.txt Wed Feb 12 14:47:58 2003 +++ b/Documentation/ia64/fsys.txt Wed Feb 12 14:47:58 2003 @@ -4,7 +4,7 @@ ----------------------------------- Started: 13-Jan-2003 - Last update: 24-Jan-2003 + Last update: 11-Feb-2003 David Mosberger-Tang @@ -42,9 +42,9 @@ can disable interrupts and avoid all other interruption-sources to avoid preemption) - - neither the memory nor the register stack can be trusted while + - neither the memory-stack nor the register-stack can be trusted while in fsys-mode (they point to the user-level stacks, which may - be invalid) + be invalid, or completely bogus addresses) In summary, fsys-mode is much more similar to running in user-mode than it is to running in kernel-mode. Of course, given that the diff -Nru a/Documentation/mmio_barrier.txt b/Documentation/mmio_barrier.txt --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/mmio_barrier.txt Wed Feb 12 14:47:58 2003 @@ -0,0 +1,15 @@ +On some platforms, so-called memory-mapped I/O is weakly ordered. For +example, the following might occur: + +CPU A writes 0x1 to Device #1 +CPU B writes 0x2 to Device #1 +Device #1 sees 0x2 +Device #1 sees 0x1 + +On such platforms, driver writers are responsible for ensuring that I/O +writes to memory-mapped addresses on their device arrive in the order +intended. The mmiob() macro is provided for this purpose. A typical use +of this macro might be immediately prior to the exit of a critical +section of code proteced by spinlocks. This would ensure that subsequent +writes to I/O space arrived only after all prior writes (much like a +typical memory barrier op, mb(), only with respect to I/O). diff -Nru a/Makefile b/Makefile --- a/Makefile Wed Feb 12 14:47:57 2003 +++ b/Makefile Wed Feb 12 14:47:57 2003 @@ -172,7 +172,7 @@ NOSTDINC_FLAGS = -nostdinc -iwithprefix include CPPFLAGS := -D__KERNEL__ -Iinclude -CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ +CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \ -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) diff -Nru a/arch/ia64/Makefile b/arch/ia64/Makefile --- a/arch/ia64/Makefile Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/Makefile Wed Feb 12 14:47:57 2003 @@ -63,7 +63,7 @@ .PHONY: boot compressed include/asm-ia64/offsets.h -all: vmlinux +all: prepare vmlinux compressed: vmlinux.gz diff -Nru a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c --- a/arch/ia64/hp/sim/simscsi.c Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/hp/sim/simscsi.c Wed Feb 12 14:47:57 2003 @@ -156,7 +156,7 @@ if (sc->request_bufflen < req.len) return; - stat.fd = desc[sc->target]; + stat.fd = desc[sc->device->id]; if (DBG) printk("simscsi_%s @ %lx (off %lx)\n", mode == SSC_READ ? "read":"write", req.addr, offset); @@ -178,7 +178,7 @@ struct disk_stat stat; struct disk_req req; - stat.fd = desc[sc->target]; + stat.fd = desc[sc->device->id]; while (list_len) { req.addr = __pa(page_address(sl->page) + sl->offset); @@ -259,6 +259,7 @@ int simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) { + unsigned int target_id = sc->device->id; char fname[MAX_ROOT_LEN+16]; size_t disk_size; char *buf; @@ -267,21 +268,21 @@ if (DBG) printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%lu,sp=%lx,done=%p\n", - sc->target, sc->cmnd[0], sc->serial_number, sp, done); + target_id, sc->cmnd[0], sc->serial_number, sp, done); #endif sc->result = DID_BAD_TARGET << 16; sc->scsi_done = done; - if (sc->target <= 15 && sc->lun == 0) { + if (target_id <= 15 && sc->device->lun == 0) { switch (sc->cmnd[0]) { case INQUIRY: if (sc->request_bufflen < 35) { break; } - sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target); - desc[sc->target] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, - 0, 0, SSC_OPEN); - if (desc[sc->target] < 0) { + sprintf (fname, "%s%c", simscsi_root, 'a' + target_id); + desc[target_id] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, + 0, 0, SSC_OPEN); + if (desc[target_id] < 0) { /* disk doesn't exist... */ break; } @@ -303,37 +304,37 @@ break; case READ_6: - if (desc[sc->target] < 0 ) + if (desc[target_id] < 0 ) break; simscsi_readwrite6(sc, SSC_READ); break; case READ_10: - if (desc[sc->target] < 0 ) + if (desc[target_id] < 0 ) break; simscsi_readwrite10(sc, SSC_READ); break; case WRITE_6: - if (desc[sc->target] < 0) + if (desc[target_id] < 0) break; simscsi_readwrite6(sc, SSC_WRITE); break; case WRITE_10: - if (desc[sc->target] < 0) + if (desc[target_id] < 0) break; simscsi_readwrite10(sc, SSC_WRITE); break; case READ_CAPACITY: - if (desc[sc->target] < 0 || sc->request_bufflen < 8) { + if (desc[target_id] < 0 || sc->request_bufflen < 8) { break; } buf = sc->request_buffer; - disk_size = simscsi_get_disk_size(desc[sc->target]); + disk_size = simscsi_get_disk_size(desc[target_id]); /* pretend to be a 1GB disk (partition table contains real stuff): */ buf[0] = (disk_size >> 24) & 0xff; diff -Nru a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c --- a/arch/ia64/kernel/acpi.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/acpi.c Wed Feb 12 14:47:58 2003 @@ -741,7 +741,7 @@ int __init -acpi_boot_init (char *cmdline) +acpi_boot_init (void) { /* diff -Nru a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S --- a/arch/ia64/kernel/entry.S Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/kernel/entry.S Wed Feb 12 14:47:57 2003 @@ -701,7 +701,7 @@ * NOTE: alloc, loadrs, and cover can't be predicated. */ (pNonSys) br.cond.dpnt dont_preserve_current_frame - cover // add current frame into dirty partition + cover // add current frame into dirty partition and set cr.ifs ;; mov r19=ar.bsp // get new backing store pointer sub r16=r16,r18 // krbs = old bsp - size of dirty partition @@ -727,7 +727,7 @@ # define Nregs 14 #endif alloc loc0=ar.pfs,2,Nregs-2,2,0 - shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1 + shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize ;; mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" @@ -774,13 +774,13 @@ ;; mov loc3=0 mov loc4=0 - mov loc9=0 mov loc5=0 mov loc6=0 + mov loc7=0 (pRecurse) br.call.sptk.many b6=rse_clear_invalid ;; - mov loc7=0 mov loc8=0 + mov loc9=0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret mov loc10=0 mov loc11=0 @@ -904,13 +904,14 @@ mov r9=ar.unat mov loc0=rp // save return address mov out0=0 // there is no "oldset" - adds out1=0,sp // out1=&sigscratch + adds out1=8,sp // out1=&sigscratch->ar_pfs (pSys) mov out2=1 // out2==1 => we're in a syscall ;; (pNonSys) mov out2=0 // out2==0 => not a syscall .fframe 16 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) st8 [sp]=r9,-16 // allocate space for ar.unat and save it + st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch .body br.call.sptk.many rp=do_notify_resume_user .ret15: .restore sp @@ -931,11 +932,12 @@ mov loc0=rp // save return address mov out0=in0 // mask mov out1=in1 // sigsetsize - adds out2=0,sp // out2=&sigscratch + adds out2=8,sp // out2=&sigscratch->ar_pfs ;; .fframe 16 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) st8 [sp]=r9,-16 // allocate space for ar.unat and save it + st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch .body br.call.sptk.many rp=ia64_rt_sigsuspend .ret17: .restore sp @@ -1242,7 +1244,7 @@ data8 sys_sched_setaffinity data8 sys_sched_getaffinity data8 sys_set_tid_address - data8 ia64_ni_syscall + data8 sys_fadvise64 data8 ia64_ni_syscall // 1235 data8 sys_exit_group data8 sys_lookup_dcookie diff -Nru a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S --- a/arch/ia64/kernel/gate.S Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/gate.S Wed Feb 12 14:47:58 2003 @@ -145,11 +145,12 @@ */ #define SIGTRAMP_SAVES \ - .unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs) \ - .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF \ - .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF \ - .savesp pr, PR_OFF+SIGCONTEXT_OFF \ - .savesp rp, RP_OFF+SIGCONTEXT_OFF \ + .unwabi @svr4, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \ + .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \ + .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \ + .savesp pr, PR_OFF+SIGCONTEXT_OFF; \ + .savesp rp, RP_OFF+SIGCONTEXT_OFF; \ + .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \ .vframesp SP_OFF+SIGCONTEXT_OFF GLOBAL_ENTRY(ia64_sigtramp) @@ -173,9 +174,7 @@ .spillsp.p p8, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF (p8) br.cond.spnt setup_rbs // yup -> (clobbers r14, r15, and r16) back_from_setup_rbs: - - .spillreg ar.pfs, r8 - alloc r8=ar.pfs,0,0,3,0 // get CFM0, EC0, and CPL0 into r8 + alloc r8=ar.pfs,0,0,3,0 ld8 out0=[base0],16 // load arg0 (signum) adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1 ;; @@ -184,17 +183,12 @@ ;; ld8 out2=[base0] // load arg2 (sigcontextp) ld8 gp=[r17] // get signal handler's global pointer - adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp ;; .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF - st8 [base0]=r9,(CFM_OFF-BSP_OFF) // save sc_ar_bsp - dep r8=0,r8,38,26 // clear EC0, CPL0 and reserved bits - adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp - ;; - .spillsp ar.pfs, CFM_OFF+SIGCONTEXT_OFF - st8 [base0]=r8 // save CFM0 + st8 [base0]=r9 // save sc_ar_bsp adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp + adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; stf.spill [base0]=f6,32 stf.spill [base1]=f7,32 @@ -217,7 +211,6 @@ ld8 r15=[base0],(CFM_OFF-BSP_OFF) // fetch sc_ar_bsp and advance to CFM_OFF mov r14=ar.bsp ;; - ld8 r8=[base0] // restore (perhaps modified) CFM0, EC0, and CPL0 cmp.ne p8,p0=r14,r15 // do we need to restore the rbs? (p8) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7) ;; diff -Nru a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c --- a/arch/ia64/kernel/ia64_ksyms.c Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/kernel/ia64_ksyms.c Wed Feb 12 14:47:57 2003 @@ -56,6 +56,12 @@ #include EXPORT_SYMBOL(clear_page); +#ifdef CONFIG_VIRTUAL_MEM_MAP +#include +EXPORT_SYMBOL(vmalloc_end); +EXPORT_SYMBOL(ia64_pfn_valid); +#endif + #include # ifndef CONFIG_NUMA EXPORT_SYMBOL(cpu_info__per_cpu); diff -Nru a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c --- a/arch/ia64/kernel/init_task.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/init_task.c Wed Feb 12 14:47:58 2003 @@ -17,6 +17,7 @@ static struct fs_struct init_fs = INIT_FS; static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); /* diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c --- a/arch/ia64/kernel/perfmon.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/perfmon.c Wed Feb 12 14:47:58 2003 @@ -8,7 +8,7 @@ * Modifications by Stephane Eranian, Hewlett-Packard Co. * Modifications by David Mosberger-Tang, Hewlett-Packard Co. * - * Copyright (C) 1999-2002 Hewlett Packard Co + * Copyright (C) 1999-2003 Hewlett Packard Co * Stephane Eranian * David Mosberger-Tang */ @@ -230,9 +230,15 @@ unsigned int protected:1; /* allow access to creator of context only */ unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ unsigned int excl_idle:1; /* exclude idle task in system wide session */ - unsigned int reserved:23; + unsigned int trap_reason:2; /* reason for going into pfm_block_ovfl_reset() */ + unsigned int reserved:21; } pfm_context_flags_t; +#define PFM_TRAP_REASON_NONE 0x0 /* default value */ +#define PFM_TRAP_REASON_BLOCKSIG 0x1 /* we need to block on overflow and signal user */ +#define PFM_TRAP_REASON_SIG 0x2 /* we simply need to signal user */ +#define PFM_TRAP_REASON_RESET 0x3 /* we need to reset PMDs */ + /* * perfmon context: encapsulates all the state of a monitoring session * XXX: probably need to change layout @@ -277,6 +283,7 @@ #define ctx_fl_protected ctx_flags.protected #define ctx_fl_using_dbreg ctx_flags.using_dbreg #define ctx_fl_excl_idle ctx_flags.excl_idle +#define ctx_fl_trap_reason ctx_flags.trap_reason /* * global information about all sessions @@ -1225,6 +1232,8 @@ ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; ctx->ctx_fl_frozen = 0; + ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; + /* * setting this flag to 0 here means, that the creator or the task that the * context is being attached are granted access. Given that a context can only @@ -1505,7 +1514,7 @@ unsigned long value, hw_value; unsigned int cnum; int i; - int ret = 0; + int ret = -EINVAL; /* we don't quite support this right now */ if (task != current) return -EINVAL; @@ -1517,7 +1526,6 @@ /* XXX: ctx locking may be required here */ - ret = -EINVAL; for (i = 0; i < count; i++, req++) { @@ -2547,6 +2555,10 @@ task = find_task_by_pid(pid); + if (task) get_task_struct(task); + + read_unlock(&tasklist_lock); + if (!task) goto abort_call; ret = -EPERM; @@ -2584,16 +2596,116 @@ ret = (*pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func)(task, ctx, arg, count, regs); abort_call: - if (task != current) read_unlock(&tasklist_lock); + if (task && task != current) put_task_struct(task); return ret; } +/* + * send SIGPROF to register task, must be invoked when it + * is safe to send a signal, e.g., not holding any runqueue + * related locks. + */ +static int +pfm_notify_user(pfm_context_t *ctx) +{ + struct siginfo si; + int ret; + + if (ctx->ctx_notify_task == NULL) { + DBprintk(("[%d] no notifier\n", current->pid)); + return -EINVAL; + } + + si.si_errno = 0; + si.si_addr = NULL; + si.si_pid = current->pid; /* who is sending */ + si.si_signo = SIGPROF; + si.si_code = PROF_OVFL; + + si.si_pfm_ovfl[0] = ctx->ctx_ovfl_regs[0]; + + /* + * when the target of the signal is not ourself, we have to be more + * careful. The notify_task may being cleared by the target task itself + * in release_thread(). We must ensure mutual exclusion here such that + * the signal is delivered (even to a dying task) safely. + */ + + if (ctx->ctx_notify_task != current) { + /* + * grab the notification lock for this task + * This guarantees that the sequence: test + send_signal + * is atomic with regards to the ctx_notify_task field. + * + * We need a spinlock and not just an atomic variable for this. + * + */ + spin_lock(&ctx->ctx_lock); + + /* + * now notify_task cannot be modified until we're done + * if NULL, they it got modified while we were in the handler + */ + if (ctx->ctx_notify_task == NULL) { + + spin_unlock(&ctx->ctx_lock); + + /* + * If we've lost the notified task, then we will run + * to completion wbut keep the PMU frozen. Results + * will be incorrect anyway. We do not kill task + * to leave it possible to attach perfmon context + * to already running task. + */ + printk("perfmon: pfm_notify_user() lost notify_task\n"); + DBprintk_ovfl(("notification task has disappeared !\n")); + + /* we cannot afford to block now */ + ctx->ctx_fl_block = 0; + + return -EINVAL; + } + + /* + * required by send_sig_info() to make sure the target + * task does not disappear on us. + */ + read_lock(&tasklist_lock); + } + /* + * in this case, we don't stop the task, we let it go on. It will + * necessarily go to the signal handler (if any) when it goes back to + * user mode. + */ + DBprintk_ovfl(("[%d] sending notification to [%d]\n", + current->pid, ctx->ctx_notify_task->pid)); + + /* + * this call is safe in an interrupt handler, so does read_lock() on tasklist_lock + */ + ret = send_sig_info(SIGPROF, &si, ctx->ctx_notify_task); + if (ret) { + printk("perfmon: send_sig_info(process %d, SIGPROF)=%d\n", + ctx->ctx_notify_task->pid, ret); + } + + /* + * now undo the protections in order + */ + if (ctx->ctx_notify_task != current) { + read_unlock(&tasklist_lock); + spin_unlock(&ctx->ctx_lock); + } + return ret; +} + void pfm_ovfl_block_reset(void) { struct thread_struct *th = ¤t->thread; pfm_context_t *ctx = current->thread.pfm_context; + unsigned int reason; int ret; /* @@ -2609,8 +2721,31 @@ printk(KERN_DEBUG "perfmon: [%d] has no PFM context\n", current->pid); return; } + /* + * extract reason for being here and clear + */ + reason = ctx->ctx_fl_trap_reason; + ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; + + DBprintk(("[%d] reason=%d\n", current->pid, reason)); + + /* + * just here for a reset (non-blocking context only) + */ + if (reason == PFM_TRAP_REASON_RESET) goto non_blocking; - if (CTX_OVFL_NOBLOCK(ctx)) goto non_blocking; + /* + * first notify user. This can fail if notify_task has disappeared. + */ + if (reason == PFM_TRAP_REASON_SIG || reason == PFM_TRAP_REASON_BLOCKSIG) { + ret = pfm_notify_user(ctx); + if (ret) return; + } + + /* + * came here just to signal (non-blocking) + */ + if (reason == PFM_TRAP_REASON_SIG) return; DBprintk(("[%d] before sleeping\n", current->pid)); @@ -2756,7 +2891,6 @@ unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL; int i; int ret = 1; - struct siginfo si; /* * It is never safe to access the task for which the overflow interrupt is destinated * using the current variable as the interrupt may occur in the middle of a context switch @@ -2869,7 +3003,7 @@ if (ovfl_notify == 0UL) { if (ovfl_pmds) pfm_reset_regs(ctx, &ovfl_pmds, PFM_PMD_SHORT_RESET); - return 0x0; + return 0x0UL; } /* @@ -2877,142 +3011,34 @@ */ ctx->ctx_ovfl_regs[0] = ovfl_pmds; - /* - * we have come to this point because there was an overflow and that notification - * was requested. The notify_task may have disappeared, in which case notify_task - * is NULL. - */ - if (ctx->ctx_notify_task) { - - si.si_errno = 0; - si.si_addr = NULL; - si.si_pid = task->pid; /* who is sending */ - - si.si_signo = SIGPROF; - si.si_code = PROF_OVFL; /* indicates a perfmon SIGPROF signal */ - /* - * Shift the bitvector such that the user sees bit 4 for PMD4 and so on. - * We only use smpl_ovfl[0] for now. It should be fine for quite a while - * until we have more than 61 PMD available. - */ - si.si_pfm_ovfl[0] = ovfl_notify; - - /* - * when the target of the signal is not ourself, we have to be more - * careful. The notify_task may being cleared by the target task itself - * in release_thread(). We must ensure mutual exclusion here such that - * the signal is delivered (even to a dying task) safely. - */ - - if (ctx->ctx_notify_task != current) { - /* - * grab the notification lock for this task - * This guarantees that the sequence: test + send_signal - * is atomic with regards to the ctx_notify_task field. - * - * We need a spinlock and not just an atomic variable for this. - * - */ - spin_lock(&ctx->ctx_lock); - - /* - * now notify_task cannot be modified until we're done - * if NULL, they it got modified while we were in the handler - */ - if (ctx->ctx_notify_task == NULL) { - - spin_unlock(&ctx->ctx_lock); - - /* - * If we've lost the notified task, then we will run - * to completion wbut keep the PMU frozen. Results - * will be incorrect anyway. We do not kill task - * to leave it possible to attach perfmon context - * to already running task. - */ - goto lost_notify; - } - /* - * required by send_sig_info() to make sure the target - * task does not disappear on us. - */ - read_lock(&tasklist_lock); - } - /* - * in this case, we don't stop the task, we let it go on. It will - * necessarily go to the signal handler (if any) when it goes back to - * user mode. - */ - DBprintk_ovfl(("[%d] sending notification to [%d]\n", - task->pid, ctx->ctx_notify_task->pid)); - - - /* - * this call is safe in an interrupt handler, so does read_lock() on tasklist_lock - */ - ret = send_sig_info(SIGPROF, &si, ctx->ctx_notify_task); - if (ret != 0) - printk(KERN_DEBUG "send_sig_info(process %d, SIGPROF)=%d\n", - ctx->ctx_notify_task->pid, ret); - /* - * now undo the protections in order - */ - if (ctx->ctx_notify_task != current) { - read_unlock(&tasklist_lock); - spin_unlock(&ctx->ctx_lock); - } - - /* - * if we block set the pfm_must_block bit - * when in block mode, we can effectively block only when the notified - * task is not self, otherwise we would deadlock. - * in this configuration, the notification is sent, the task will not - * block on the way back to user mode, but the PMU will be kept frozen - * until PFM_RESTART. - * Note that here there is still a race condition with notify_task - * possibly being nullified behind our back, but this is fine because - * it can only be changed to NULL which by construction, can only be - * done when notify_task != current. So if it was already different - * before, changing it to NULL will still maintain this invariant. - * Of course, when it is equal to current it cannot change at this point. - */ - DBprintk_ovfl(("block=%d notify [%d] current [%d]\n", - ctx->ctx_fl_block, - ctx->ctx_notify_task ? ctx->ctx_notify_task->pid: -1, - current->pid )); + DBprintk_ovfl(("block=%d notify [%d] current [%d]\n", + ctx->ctx_fl_block, + ctx->ctx_notify_task ? ctx->ctx_notify_task->pid: -1, + current->pid )); - if (!CTX_OVFL_NOBLOCK(ctx) && ctx->ctx_notify_task != task) { - t->pfm_ovfl_block_reset = 1; /* will cause blocking */ - } + /* + * ctx_notify_task could already be NULL, checked in pfm_notify_user() + */ + if (CTX_OVFL_NOBLOCK(ctx) == 0 && ctx->ctx_notify_task != task) { + t->pfm_ovfl_block_reset = 1; /* will cause blocking */ + ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCKSIG; } else { -lost_notify: /* XXX: more to do here, to convert to non-blocking (reset values) */ - - DBprintk_ovfl(("notification task has disappeared !\n")); - /* - * for a non-blocking context, we make sure we do not fall into the - * pfm_overflow_notify() trap. Also in the case of a blocking context with lost - * notify process, then we do not want to block either (even though it is - * interruptible). In this case, the PMU will be kept frozen and the process will - * run to completion without monitoring enabled. - * - * Of course, we cannot loose notify process when self-monitoring. - */ - t->pfm_ovfl_block_reset = 0; - + t->pfm_ovfl_block_reset = 1; /* will cause blocking */ + ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_SIG; } + /* - * If notification was successful, then we rely on the pfm_restart() - * call to unfreeze and reset (in both blocking or non-blocking mode). - * - * If notification failed, then we will keep the PMU frozen and run - * the task to completion + * keep the PMU frozen until either pfm_restart() or + * task completes (non-blocking or notify_task gone). */ ctx->ctx_fl_frozen = 1; - DBprintk_ovfl(("return pmc0=0x%x must_block=%ld\n", - ctx->ctx_fl_frozen ? 0x1 : 0x0, t->pfm_ovfl_block_reset)); + DBprintk_ovfl(("return pmc0=0x%x must_block=%ld reason=%d\n", + ctx->ctx_fl_frozen ? 0x1 : 0x0, + t->pfm_ovfl_block_reset, + ctx->ctx_fl_trap_reason)); - return ctx->ctx_fl_frozen ? 0x1 : 0x0; + return 0x1UL; } static void @@ -3060,14 +3086,21 @@ * assume PMC[0].fr = 1 at this point */ pmc0 = pfm_overflow_handler(task, ctx, pmc0, regs); - /* - * We always clear the overflow status bits and either unfreeze - * or keep the PMU frozen. + * we can only update pmc0 when the overflow + * is for the current context. In UP the current + * task may not be the one owning the PMU */ - ia64_set_pmc(0, pmc0); - ia64_srlz_d(); - + if (task == current) { + /* + * We always clear the overflow status bits and either unfreeze + * or keep the PMU frozen. + */ + ia64_set_pmc(0, pmc0); + ia64_srlz_d(); + } else { + task->thread.pmc[0] = pmc0; + } } else { pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++; } @@ -3373,16 +3406,20 @@ if (mask & 0x1) ia64_set_pmc(i, t->pmc[i]); } + /* + * manually invoke core interrupt handler + * if the task had a pending overflow when it was ctxsw out. + * Side effect on ctx_fl_frozen is possible. + */ if (t->pmc[0] & ~0x1) { - pfm_overflow_handler(task, ctx, t->pmc[0], NULL); + t->pmc[0] = pfm_overflow_handler(task, ctx, t->pmc[0], NULL); } /* - * fl_frozen==1 when we are in blocking mode waiting for restart + * unfreeze PMU if possible */ - if (ctx->ctx_fl_frozen == 0) { - pfm_unfreeze_pmu(); - } + if (ctx->ctx_fl_frozen == 0) pfm_unfreeze_pmu(); + atomic_set(&ctx->ctx_last_cpu, smp_processor_id()); SET_PMU_OWNER(task); @@ -3770,8 +3807,9 @@ } } - nctx->ctx_fl_frozen = 0; - nctx->ctx_ovfl_regs[0] = 0UL; + nctx->ctx_fl_frozen = 0; + nctx->ctx_ovfl_regs[0] = 0UL; + nctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; atomic_set(&nctx->ctx_last_cpu, -1); /* diff -Nru a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c --- a/arch/ia64/kernel/ptrace.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/ptrace.c Wed Feb 12 14:47:58 2003 @@ -1,7 +1,7 @@ /* * Kernel support for the ptrace() and syscall tracing interfaces. * - * Copyright (C) 1999-2002 Hewlett-Packard Co + * Copyright (C) 1999-2003 Hewlett-Packard Co * David Mosberger-Tang * * Derived from the x86 and Alpha versions. Most of the code in here @@ -1235,19 +1235,12 @@ ret = 0; goto out_tsk; - case PTRACE_GETSIGINFO: - ret = -EIO; - if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t)) || !child->thread.siginfo) - goto out_tsk; - ret = copy_siginfo_to_user((siginfo_t *) data, child->thread.siginfo); + case PTRACE_OLD_GETSIGINFO: /* for backwards-compatibility */ + ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); goto out_tsk; - case PTRACE_SETSIGINFO: - ret = -EIO; - if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t)) - || child->thread.siginfo == 0) - goto out_tsk; - ret = copy_siginfo_from_user(child->thread.siginfo, (siginfo_t *) data); + case PTRACE_OLD_SETSIGINFO: /* for backwards-compatibility */ + ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); goto out_tsk; case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c --- a/arch/ia64/kernel/setup.c Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/kernel/setup.c Wed Feb 12 14:47:57 2003 @@ -363,7 +363,7 @@ #ifdef CONFIG_ACPI_BOOT /* Initialize the ACPI boot-time table parser */ - acpi_table_init(*cmdline_p); + acpi_table_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); # endif @@ -422,9 +422,9 @@ cpu_init(); /* initialize the bootstrap CPU */ #ifdef CONFIG_ACPI_BOOT - acpi_boot_init(*cmdline_p); + acpi_boot_init(); #endif -#ifdef CONFIG_SERIAL_HCDP +#ifdef CONFIG_SERIAL_8250_HCDP if (efi.hcdp) { void setup_serial_hcdp(void *); diff -Nru a/arch/ia64/kernel/sigframe.h b/arch/ia64/kernel/sigframe.h --- a/arch/ia64/kernel/sigframe.h Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/sigframe.h Wed Feb 12 14:47:58 2003 @@ -1,6 +1,6 @@ struct sigscratch { unsigned long scratch_unat; /* ar.unat for the general registers saved in pt */ - unsigned long pad; + unsigned long ar_pfs; /* for syscalls, the user-level function-state */ struct pt_regs pt; }; diff -Nru a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c --- a/arch/ia64/kernel/signal.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/kernel/signal.c Wed Feb 12 14:47:58 2003 @@ -315,7 +315,7 @@ static long setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr) { - unsigned long flags = 0, ifs, nat; + unsigned long flags = 0, ifs, cfm, nat; long err; ifs = scr->pt.cr_ifs; @@ -325,7 +325,9 @@ if ((ifs & (1UL << 63)) == 0) { /* if cr_ifs isn't valid, we got here through a syscall */ flags |= IA64_SC_FLAG_IN_SYSCALL; - } + cfm = scr->ar_pfs & ((1UL << 38) - 1); + } else + cfm = ifs & ((1UL << 38) - 1); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { flags |= IA64_SC_FLAG_FPH_VALID; @@ -344,6 +346,7 @@ err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); + err |= __put_user(cfm, &sc->sc_cfm); err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); @@ -422,6 +425,15 @@ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ + /* + * Force the interruption function mask to zero. This has no effect when a + * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is + * ignored), but it has the desirable effect of making it possible to deliver a + * signal with an incomplete register frame (which happens when a mandatory RSE + * load faults). Furthermore, it has no negative effect on the getting the user's + * dirty partition preserved, because that's governed by scr->pt.loadrs. + */ + scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in @@ -522,7 +534,7 @@ if (signr <= 0) break; - ka = ¤t->sig->action[signr - 1]; + ka = ¤t->sighand->action[signr - 1]; if (restart) { switch (errno) { diff -Nru a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c --- a/arch/ia64/kernel/time.c Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/kernel/time.c Wed Feb 12 14:47:57 2003 @@ -25,7 +25,7 @@ #include extern unsigned long wall_jiffies; -extern unsigned long last_time_offset; +extern unsigned long last_nsec_offset; u64 jiffies_64; @@ -74,13 +74,13 @@ - (lost + 1)*cpu_data(time_keeper_id)->itm_delta); now = ia64_get_itc(); - if ((long) (now - last_tick) < 0) { + if (unlikely((long) (now - last_tick) < 0)) { printk(KERN_ERR "CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!\n", smp_processor_id(), now, last_tick); - return last_time_offset; + return last_nsec_offset; } elapsed_cycles = now - last_tick; - return (elapsed_cycles*local_cpu_data->usec_per_cyc) >> IA64_USEC_PER_CYC_SHIFT; + return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT; } void @@ -115,30 +115,55 @@ void do_gettimeofday (struct timeval *tv) { - unsigned long seq, usec, sec, old; + unsigned long seq, nsec, usec, sec, old, offset; - do { + while (1) { seq = read_seqbegin(&xtime_lock); - usec = gettimeoffset(); - + { + old = last_nsec_offset; + offset = gettimeoffset(); + sec = xtime.tv_sec; + nsec = xtime.tv_nsec; + } + if (unlikely(read_seqretry(&xtime_lock, seq))) + continue; /* - * Ensure time never goes backwards, even when ITC on - * different CPUs are not perfectly synchronized. + * Ensure that for any pair of causally ordered gettimeofday() calls, time + * never goes backwards (even when ITC on different CPUs are not perfectly + * synchronized). (A pair of concurrent calls to gettimeofday() is by + * definition non-causal and hence it makes no sense to talk about + * time-continuity for such calls.) + * + * Doing this in a lock-free and race-free manner is tricky. Here is why + * it works (most of the time): read_seqretry() just succeeded, which + * implies we calculated a consistent (valid) value for "offset". If the + * cmpxchg() below succeeds, we further know that last_nsec_offset still + * has the same value as at the beginning of the loop, so there was + * presumably no timer-tick or other updates to last_nsec_offset in the + * meantime. This isn't 100% true though: there _is_ a possibility of a + * timer-tick occurring right right after read_seqretry() and then getting + * zero or more other readers which will set last_nsec_offset to the same + * value as the one we read at the beginning of the loop. If this + * happens, we'll end up returning a slightly newer time than we ought to + * (the jump forward is at most "offset" nano-seconds). There is no + * danger of causing time to go backwards, though, so we are safe in that + * sense. We could make the probability of this unlucky case occurring + * arbitrarily small by encoding a version number in last_nsec_offset, but + * even without versioning, the probability of this unlucky case should be + * so small that we won't worry about it. */ - do { - old = last_time_offset; - if (usec <= old) { - usec = old; - break; - } - } while (cmpxchg(&last_time_offset, old, usec) != old); - - sec = xtime.tv_sec; - usec += xtime.tv_nsec / 1000; - } while (read_seqend(&xtime_lock, seq)); + if (offset <= old) { + offset = old; + break; + } else if (likely(cmpxchg(&last_nsec_offset, old, offset) == old)) + break; + + /* someone else beat us to updating last_nsec_offset; try again */ + } + usec = (nsec + offset) / 1000; - while (usec >= 1000000) { + while (unlikely(usec >= 1000000)) { usec -= 1000000; ++sec; } @@ -278,7 +303,7 @@ local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; local_cpu_data->itc_freq = itc_freq; local_cpu_data->cyc_per_usec = (itc_freq + 500000) / 1000000; - local_cpu_data->usec_per_cyc = ((1000000UL<nsec_per_cyc = ((1000000000UL<mmap_sem); vma = find_vma_prev(mm, address, &prev_vma); @@ -79,7 +91,7 @@ # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ || (1 << VM_EXEC_BIT) != VM_EXEC) -# error File is out of sync with . Pleaes update. +# error File is out of sync with . Please update. # endif mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) @@ -139,6 +151,9 @@ bad_area: up_read(&mm->mmap_sem); +#ifdef CONFIG_VIRTUAL_MEM_MAP + bad_area_no_up: +#endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff -Nru a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c --- a/arch/ia64/mm/init.c Wed Feb 12 14:47:57 2003 +++ b/arch/ia64/mm/init.c Wed Feb 12 14:47:57 2003 @@ -38,6 +38,13 @@ unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; +#ifdef CONFIG_VIRTUAL_MEM_MAP +# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ + unsigned long vmalloc_end = VMALLOC_END_INIT; + static struct page *vmem_map; + static unsigned long num_dma_physpages; +#endif + static int pgt_cache_water[2] = { 25, 50 }; void @@ -338,6 +345,144 @@ ia64_tlb_init(); } +#ifdef CONFIG_VIRTUAL_MEM_MAP + +static int +create_mem_map_page_table (u64 start, u64 end, void *arg) +{ + unsigned long address, start_page, end_page; + struct page *map_start, *map_end; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); + map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); + + start_page = (unsigned long) map_start & PAGE_MASK; + end_page = PAGE_ALIGN((unsigned long) map_end); + + for (address = start_page; address < end_page; address += PAGE_SIZE) { + pgd = pgd_offset_k(address); + if (pgd_none(*pgd)) + pgd_populate(&init_mm, pgd, alloc_bootmem_pages(PAGE_SIZE)); + pmd = pmd_offset(pgd, address); + + if (pmd_none(*pmd)) + pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages(PAGE_SIZE)); + pte = pte_offset_kernel(pmd, address); + + if (pte_none(*pte)) + set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages(PAGE_SIZE)) >> PAGE_SHIFT, + PAGE_KERNEL)); + } + return 0; +} + +struct memmap_init_callback_data { + memmap_init_callback_t *memmap_init; + struct page *start; + struct page *end; + int nid; + unsigned long zone; +}; + +static int +virtual_memmap_init (u64 start, u64 end, void *arg) +{ + struct memmap_init_callback_data *args; + struct page *map_start, *map_end; + + args = (struct memmap_init_callback_data *) arg; + + map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); + map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); + + if (map_start < args->start) + map_start = args->start; + if (map_end > args->end) + map_end = args->end; + + /* + * We have to initialize "out of bounds" struct page elements + * that fit completely on the same pages that were allocated + * for the "in bounds" elements because they may be referenced + * later (and found to be "reserved"). + */ + + map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); + map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) + / sizeof(struct page)); + + if (map_start < map_end) + (*args->memmap_init)(map_start, (unsigned long)(map_end - map_start), + args->nid,args->zone,page_to_pfn(map_start)); + return 0; +} + +void +arch_memmap_init (memmap_init_callback_t *memmap_init, + struct page *start, unsigned long size, int nid, + unsigned long zone, unsigned long start_pfn) +{ + if (!vmem_map) + memmap_init(start,size,nid,zone,start_pfn); + else { + struct memmap_init_callback_data args; + + args.memmap_init = memmap_init; + args.start = start; + args.end = start + size; + args.nid = nid; + args.zone = zone; + + efi_memmap_walk(virtual_memmap_init, &args); + } +} + +int +ia64_pfn_valid (unsigned long pfn) +{ + char byte; + + return __get_user(byte, (char *) pfn_to_page(pfn)) == 0; +} + +static int +count_dma_pages (u64 start, u64 end, void *arg) +{ + unsigned long *count = arg; + + if (end <= MAX_DMA_ADDRESS) + *count += (end - start) >> PAGE_SHIFT; + return 0; +} + +static int +find_largest_hole (u64 start, u64 end, void *arg) +{ + u64 *max_gap = arg; + + static u64 last_end = PAGE_OFFSET; + + /* NOTE: this algorithm assumes efi memmap table is ordered */ + + if (*max_gap < (start - last_end)) + *max_gap = start - last_end; + last_end = end; + return 0; +} +#endif /* CONFIG_VIRTUAL_MEM_MAP */ + +static int +count_pages (u64 start, u64 end, void *arg) +{ + unsigned long *count = arg; + + *count += (end - start) >> PAGE_SHIFT; + return 0; +} + /* * Set up the page tables. */ @@ -349,18 +494,71 @@ extern void discontig_paging_init(void); discontig_paging_init(); + efi_memmap_walk(count_pages, &num_physpages); } #else /* !CONFIG_DISCONTIGMEM */ void paging_init (void) { - unsigned long max_dma, zones_size[MAX_NR_ZONES]; + unsigned long max_dma; + unsigned long zones_size[MAX_NR_ZONES]; +# ifdef CONFIG_VIRTUAL_MEM_MAP + unsigned long zholes_size[MAX_NR_ZONES]; + unsigned long max_gap; +# endif /* initialize mem_map[] */ memset(zones_size, 0, sizeof(zones_size)); + num_physpages = 0; + efi_memmap_walk(count_pages, &num_physpages); + max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; + +# ifdef CONFIG_VIRTUAL_MEM_MAP + memset(zholes_size, 0, sizeof(zholes_size)); + + num_dma_physpages = 0; + efi_memmap_walk(count_dma_pages, &num_dma_physpages); + + if (max_low_pfn < max_dma) { + zones_size[ZONE_DMA] = max_low_pfn; + zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages; + } + else { + zones_size[ZONE_DMA] = max_dma; + zholes_size[ZONE_DMA] = max_dma - num_dma_physpages; + if (num_physpages > num_dma_physpages) { + zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; + zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma) + - (num_physpages - num_dma_physpages)); + } + } + + max_gap = 0; + efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); + if (max_gap < LARGE_GAP) { + vmem_map = (struct page *) 0; + free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, zholes_size); + mem_map = contig_page_data.node_mem_map; + } + else { + unsigned long map_size; + + /* allocate virtual_mem_map */ + + map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); + vmalloc_end -= map_size; + vmem_map = (struct page *) vmalloc_end; + efi_memmap_walk(create_mem_map_page_table, 0); + + free_area_init_node(0, &contig_page_data, vmem_map, zones_size, 0, zholes_size); + + mem_map = contig_page_data.node_mem_map; + printk("Virtual mem_map starts at 0x%p\n", mem_map); + } +# else /* !CONFIG_VIRTUAL_MEM_MAP */ if (max_low_pfn < max_dma) zones_size[ZONE_DMA] = max_low_pfn; else { @@ -368,19 +566,11 @@ zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; } free_area_init(zones_size); +# endif /* !CONFIG_VIRTUAL_MEM_MAP */ } #endif /* !CONFIG_DISCONTIGMEM */ static int -count_pages (u64 start, u64 end, void *arg) -{ - unsigned long *count = arg; - - *count += (end - start) >> PAGE_SHIFT; - return 0; -} - -static int count_reserved_pages (u64 start, u64 end, void *arg) { unsigned long num_reserved = 0; @@ -415,9 +605,6 @@ BUG(); max_mapnr = max_low_pfn; #endif - - num_physpages = 0; - efi_memmap_walk(count_pages, &num_physpages); high_memory = __va(max_low_pfn * PAGE_SIZE); diff -Nru a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c --- a/arch/ia64/tools/print_offsets.c Wed Feb 12 14:47:58 2003 +++ b/arch/ia64/tools/print_offsets.c Wed Feb 12 14:47:58 2003 @@ -10,7 +10,7 @@ * gets translated into an assembly file which, in turn, is processed * by awk to generate offsets.h. So if you make any changes to this * file, be sure to verify that the awk procedure still works (see - * prin_offsets.awk). + * print_offsets.awk). */ #include diff -Nru a/drivers/acpi/osl.c b/drivers/acpi/osl.c --- a/drivers/acpi/osl.c Wed Feb 12 14:47:58 2003 +++ b/drivers/acpi/osl.c Wed Feb 12 14:47:58 2003 @@ -143,9 +143,9 @@ #ifdef CONFIG_ACPI_EFI addr->pointer_type = ACPI_PHYSICAL_POINTER; if (efi.acpi20) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi20); + addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi20); else if (efi.acpi) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi); + addr->pointer.physical = (acpi_physical_address) virt_to_phys(efi.acpi); else { printk(KERN_ERR PREFIX "System description tables not found\n"); return AE_NOT_FOUND; @@ -224,7 +224,14 @@ acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context) { #ifdef CONFIG_IA64 - irq = gsi_to_vector(irq); + int vector; + + vector = acpi_irq_to_vector(irq); + if (vector < 0) { + printk(KERN_ERR PREFIX "SCI (IRQ%d) not registerd\n", irq); + return AE_OK; + } + irq = vector; #endif acpi_irq_irq = irq; acpi_irq_handler = handler; @@ -242,7 +249,7 @@ { if (acpi_irq_handler) { #ifdef CONFIG_IA64 - irq = gsi_to_vector(irq); + irq = acpi_irq_to_vector(irq); #endif free_irq(irq, acpi_irq); acpi_irq_handler = NULL; diff -Nru a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c --- a/drivers/acpi/pci_irq.c Wed Feb 12 14:47:57 2003 +++ b/drivers/acpi/pci_irq.c Wed Feb 12 14:47:57 2003 @@ -36,6 +36,9 @@ #ifdef CONFIG_X86_IO_APIC #include #endif +#ifdef CONFIG_IOSAPIC +# include +#endif #include "acpi_bus.h" #include "acpi_drivers.h" @@ -250,6 +253,8 @@ return_VALUE(0); } + entry->irq = entry->link.index; + if (!entry->irq && entry->link.handle) { entry->irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index); if (!entry->irq) { @@ -355,7 +360,7 @@ return_VALUE(0); } - dev->irq = irq; + dev->irq = gsi_to_irq(irq); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s using IRQ %d\n", dev->slot_name, dev->irq)); diff -Nru a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig --- a/drivers/char/agp/Kconfig Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/Kconfig Wed Feb 12 14:47:58 2003 @@ -61,16 +61,6 @@ You should say Y here if you use XFree86 3.3.6 or 4.x and want to use GLX or DRI. If unsure, say N. -config AGP_VIA_KT400 - tristate "VIA KT400 chipset support" - depends on AGP3 - help - This option gives you AGP support for the GLX component of the - XFree86 4.x on VIA KT400 AGP 3.0 chipsets. - - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. - config AGP_AMD tristate "AMD Irongate, 761, and 762 support" depends on AGP @@ -140,6 +130,11 @@ help This option gives you AGP GART support for the HP ZX1 chipset for IA64 processors. + +config AGP_ALPHA_CORE + tristate + depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL) + default AGP # Put AGP 3.0 entries below here. diff -Nru a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile --- a/drivers/char/agp/Makefile Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/Makefile Wed Feb 12 14:47:57 2003 @@ -10,7 +10,6 @@ obj-$(CONFIG_AGP_INTEL) += intel-agp.o obj-$(CONFIG_AGP_VIA) += via-agp.o -obj-$(CONFIG_AGP_VIA_KT400) += via-kt400.o obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o obj-$(CONFIG_AGP_SIS) += sis-agp.o obj-$(CONFIG_AGP_ALI) += ali-agp.o @@ -18,6 +17,7 @@ obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_AMD_8151) += amd-k8-agp.o +obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o obj-$(CONFIG_AGP_I7x05) += i7x05-agp.o diff -Nru a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h --- a/drivers/char/agp/agp.h Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/agp.h Wed Feb 12 14:47:57 2003 @@ -30,7 +30,7 @@ #include /* for flush_agp_cache() */ -extern struct agp_bridge_data agp_bridge; +extern struct agp_bridge_data *agp_bridge; #define PFX "agpgart: " @@ -128,6 +128,7 @@ int num_aperture_sizes; int capndx; int cant_use_aperture; + struct vm_operations_struct *vm_ops; /* Links to driver specific functions */ @@ -165,20 +166,20 @@ #define MB(x) (KB (KB (x))) #define GB(x) (MB (KB (x))) -#define CACHE_FLUSH agp_bridge.cache_flush +#define CACHE_FLUSH agp_bridge->cache_flush #define A_SIZE_8(x) ((struct aper_size_info_8 *) x) #define A_SIZE_16(x) ((struct aper_size_info_16 *) x) #define A_SIZE_32(x) ((struct aper_size_info_32 *) x) #define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x) #define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x) -#define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i) -#define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i) -#define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i) -#define A_IDXLVL2() (A_SIZE_LVL2(agp_bridge.aperture_sizes) + i) -#define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i) +#define A_IDX8() (A_SIZE_8(agp_bridge->aperture_sizes) + i) +#define A_IDX16() (A_SIZE_16(agp_bridge->aperture_sizes) + i) +#define A_IDX32() (A_SIZE_32(agp_bridge->aperture_sizes) + i) +#define A_IDXLVL2() (A_SIZE_LVL2(agp_bridge->aperture_sizes) + i) +#define A_IDXFIX() (A_SIZE_FIX(agp_bridge->aperture_sizes) + i) #define MAXKEY (4096 * 32) -#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) +#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge->scratch_page) /* intel register */ #define INTEL_APBASE 0x10 @@ -366,7 +367,7 @@ /* Generic routines. */ void agp_generic_agp_enable(u32 mode); -int agp_generic_agp_3_0_enable(u32 mode); +void agp_generic_agp_3_0_enable(u32 mode); int agp_generic_create_gatt_table(void); int agp_generic_free_gatt_table(void); agp_memory *agp_create_memory(int scratch_pages); diff -Nru a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c --- a/drivers/char/agp/ali-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/ali-agp.c Wed Feb 12 14:47:58 2003 @@ -17,15 +17,15 @@ u32 temp; struct aper_size_info_32 *values; - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); temp &= ~(0xfffffff0); - values = A_SIZE_32(agp_bridge.aperture_sizes); + values = A_SIZE_32(agp_bridge->aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -37,9 +37,9 @@ { u32 temp; - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); // clear tag - pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, + pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, ((temp & 0xfffffff0) | 0x00000001|0x00000002)); } @@ -48,15 +48,15 @@ struct aper_size_info_32 *previous_size; u32 temp; - previous_size = A_SIZE_32(agp_bridge.previous_size); + previous_size = A_SIZE_32(agp_bridge->previous_size); - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); // clear tag - pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, + pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, ((temp & 0xffffff00) | 0x00000001|0x00000002)); - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, ((temp & 0x00000ff0) | previous_size->size_value)); } @@ -65,24 +65,24 @@ u32 temp; struct aper_size_info_32 *current_size; - current_size = A_SIZE_32(agp_bridge.current_size); + current_size = A_SIZE_32(agp_bridge->current_size); /* aperture size and gatt addr */ - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000)) + pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp); + temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000)) | (current_size->size_value & 0xf)); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp); + pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp); /* tlb control */ - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010)); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, ALI_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); #if 0 - if (agp_bridge.type == ALI_M1541) { + if (agp_bridge->type == ALI_M1541) { u32 nlvm_addr = 0; switch (current_size->size_value) { @@ -101,15 +101,15 @@ nlvm_addr--; nlvm_addr&=0xfff00000; - nlvm_addr+= agp_bridge.gart_bus_addr; - nlvm_addr|=(agp_bridge.gart_bus_addr>>12); + nlvm_addr+= agp_bridge->gart_bus_addr; + nlvm_addr|=(agp_bridge->gart_bus_addr>>12); printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); } #endif - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp); temp &= 0xffffff7f; //enable TLB - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp); + pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp); return 0; } @@ -118,23 +118,23 @@ { /* Memory type is ignored */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } static void ali_cache_flush(void) { global_cache_flush(); - if (agp_bridge.type == ALI_M1541) { + if (agp_bridge->type == ALI_M1541) { int i, page_count; u32 temp; - page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order; + page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - (agp_bridge.gatt_bus_addr + i)) | + (agp_bridge->gatt_bus_addr + i)) | ALI_CACHE_FLUSH_EN)); } } @@ -148,9 +148,9 @@ if (adr == 0) return 0; - if (agp_bridge.type == ALI_M1541) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + if (agp_bridge->type == ALI_M1541) { + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | virt_to_phys(adr)) | ALI_CACHE_FLUSH_EN )); @@ -167,9 +167,9 @@ global_cache_flush(); - if (agp_bridge.type == ALI_M1541) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + if (agp_bridge->type == ALI_M1541) { + pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN)); @@ -197,30 +197,30 @@ static int __init ali_generic_setup (struct pci_dev *pdev) { - agp_bridge.masks = ali_generic_masks; - agp_bridge.aperture_sizes = (void *) ali_generic_sizes; - agp_bridge.size_type = U32_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = ali_configure; - agp_bridge.fetch_size = ali_fetch_size; - agp_bridge.cleanup = ali_cleanup; - agp_bridge.tlb_flush = ali_tlbflush; - agp_bridge.mask_memory = ali_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = ali_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = ali_alloc_page; - agp_bridge.agp_destroy_page = ali_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = ali_generic_masks; + agp_bridge->aperture_sizes = (void *) ali_generic_sizes; + agp_bridge->size_type = U32_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = ali_configure; + agp_bridge->fetch_size = ali_fetch_size; + agp_bridge->cleanup = ali_cleanup; + agp_bridge->tlb_flush = ali_tlbflush; + agp_bridge->mask_memory = ali_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = ali_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = ali_alloc_page; + agp_bridge->agp_destroy_page = ali_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -313,7 +313,7 @@ printk (KERN_INFO PFX "Detected ALi %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = devs[j].chipset; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -327,7 +327,7 @@ if (agp_try_unsupported) { printk(KERN_WARNING PFX "Trying generic ALi routines" " for device id: %04x\n", pdev->device); - agp_bridge.type = ALI_GENERIC; + agp_bridge->type = ALI_GENERIC; return ali_generic_setup(pdev); } @@ -350,10 +350,10 @@ /* probe for known chipsets */ if (agp_lookup_host_bridge(dev) != -ENODEV) { - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); ali_agp_driver.dev = dev; agp_register_driver(&ali_agp_driver); return 0; @@ -387,7 +387,7 @@ ret_val = pci_module_init(&agp_ali_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/alpha-agp.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1,217 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../../arch/alpha/kernel/pci_impl.h" + +#include "agp.h" + +static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma, + unsigned long address, + int write_access) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + dma_addr_t dma_addr; + unsigned long pa; + struct page *page; + + dma_addr = address - vma->vm_start + agp->aperture.bus_base; + pa = agp->ops->translate(agp, dma_addr); + + if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */ + + /* + * Get the page, inc the use count, and return it + */ + page = virt_to_page(__va(pa)); + get_page(page); + return page; +} + +static struct aper_size_info_fixed alpha_core_agp_sizes[] = +{ + { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ +}; + +static struct gatt_mask alpha_core_agp_masks[] = { + { .mask = 0, .type = 0 }, +}; + +struct vm_operations_struct alpha_core_agp_vm_ops = { + .nopage = alpha_core_agp_vm_nopage, +}; + + +static int alpha_core_agp_nop(void) +{ + /* just return success */ + return 0; +} + +static int alpha_core_agp_fetch_size(void) +{ + return alpha_core_agp_sizes[0].size; +} + +static int alpha_core_agp_configure(void) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + agp_bridge->gart_bus_addr = agp->aperture.bus_base; + return 0; +} + +static void alpha_core_agp_cleanup(void) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + + agp->ops->cleanup(agp); +} + +static void alpha_core_agp_tlbflush(agp_memory *mem) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + alpha_mv.mv_pci_tbi(agp->hose, 0, -1); +} + +static unsigned long alpha_core_agp_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + return addr | agp_bridge->masks[0].mask; +} + +static void alpha_core_agp_enable(u32 mode) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + + agp->mode.lw = agp_collect_device_status(mode, agp->capability.lw); + + agp->mode.bits.enable = 1; + agp->ops->configure(agp); + + agp_device_command(agp->mode.lw, 0); +} + +static int alpha_core_agp_insert_memory(agp_memory *mem, off_t pg_start, + int type) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + int num_entries, status; + void *temp; + + temp = agp_bridge->current_size; + num_entries = A_SIZE_FIX(temp)->num_entries; + if ((pg_start + mem->page_count) > num_entries) return -EINVAL; + + status = agp->ops->bind(agp, pg_start, mem); + mb(); + agp_bridge->tlb_flush(mem); + + return status; +} + +static int alpha_core_agp_remove_memory(agp_memory *mem, off_t pg_start, + int type) +{ + alpha_agp_info *agp = agp_bridge->dev_private_data; + int status; + + status = agp->ops->unbind(agp, pg_start, mem); + agp_bridge->tlb_flush(mem); + return status; +} + + +static struct agp_driver alpha_core_agp_driver = { + .owner = THIS_MODULE, +}; + +int __init +alpha_core_agp_setup(void) +{ + alpha_agp_info *agp = alpha_mv.agp_info(); + struct aper_size_info_fixed *aper_size; + + if (!agp) return -ENODEV; + if (agp->ops->setup(agp)) return -ENODEV; + + /* + * Build the aperture size descriptor + */ + aper_size = alpha_core_agp_sizes; + if (!aper_size) return -ENOMEM; + aper_size->size = agp->aperture.size / (1024 * 1024); + aper_size->num_entries = agp->aperture.size / PAGE_SIZE; + aper_size->page_order = ffs(aper_size->num_entries / 1024) - 1; + + /* + * Build a fake pci_dev struct + */ + if (!(agp_bridge->dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL))) { + return -ENOMEM; + } + agp_bridge->dev->vendor = 0xffff; + agp_bridge->dev->device = 0xffff; + agp_bridge->dev->sysdata = agp->hose; + + /* + * Fill in the rest of the agp_bridge struct + */ + agp_bridge->masks = alpha_core_agp_masks; + agp_bridge->aperture_sizes = aper_size; + agp_bridge->current_size = aper_size; /* only one entry */ + agp_bridge->size_type = FIXED_APER_SIZE; + agp_bridge->num_aperture_sizes = 1; + agp_bridge->dev_private_data = agp; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = alpha_core_agp_configure; + agp_bridge->fetch_size = alpha_core_agp_fetch_size; + agp_bridge->cleanup = alpha_core_agp_cleanup; + agp_bridge->tlb_flush = alpha_core_agp_tlbflush; + agp_bridge->mask_memory = alpha_core_agp_mask_memory; + agp_bridge->agp_enable = alpha_core_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = alpha_core_agp_nop; + agp_bridge->free_gatt_table = alpha_core_agp_nop; + agp_bridge->insert_memory = alpha_core_agp_insert_memory; + agp_bridge->remove_memory = alpha_core_agp_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->mode = agp->capability.lw; + agp_bridge->cant_use_aperture = 1; + agp_bridgevm_ops = &alpha_core_agp_vm_ops; + + alpha_core_agp_driver.dev = agp_bridge->dev; + agp_register_driver(&alpha_core_agp_driver); + printk(KERN_INFO "Detected AGP on hose %d\n", agp->hose->index); + return 0; +} + +static int __init agp_alpha_core_init(void) +{ + int ret_val = -ENODEV; + if (alpha_mv.agp_info) { + agp_bridge->type = ALPHA_CORE_AGP; + ret_val = alpha_core_agp_setup(); + } + + return ret_val; +} + +static void __exit agp_alpha_core_cleanup(void) +{ + agp_unregister_driver(&alpha_core_agp_driver); + /* no pci driver for core */ +} + +module_init(agp_alpha_core_init); +module_exit(agp_alpha_core_cleanup); + +MODULE_AUTHOR("Jeff Wiedemeier "); +MODULE_LICENSE("GPL and additional rights"); diff -Nru a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c --- a/drivers/char/agp/amd-k7-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/amd-k7-agp.c Wed Feb 12 14:47:58 2003 @@ -45,7 +45,7 @@ CACHE_FLUSH(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { - page_map->remapped[i] = agp_bridge.scratch_page; + page_map->remapped[i] = agp_bridge->scratch_page; } return 0; @@ -115,7 +115,7 @@ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) @@ -129,7 +129,7 @@ u32 temp; int i; - value = A_SIZE_LVL2(agp_bridge.current_size); + value = A_SIZE_LVL2(agp_bridge->current_size); retval = amd_create_page_map(&page_dir); if (retval != 0) { return retval; @@ -141,18 +141,18 @@ return retval; } - agp_bridge.gatt_table_real = (u32 *)page_dir.real; - agp_bridge.gatt_table = (u32 *)page_dir.remapped; - agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); + agp_bridge->gatt_table_real = (u32 *)page_dir.real; + agp_bridge->gatt_table = (u32 *)page_dir.remapped; + agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ - pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); + pci_read_config_dword(agp_bridge->dev, AMD_APBASE, &temp); addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - agp_bridge.gart_bus_addr = addr; + agp_bridge->gart_bus_addr = addr; /* Calculate the agp offset */ for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { @@ -168,8 +168,8 @@ { struct amd_page_map page_dir; - page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; - page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; + page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge->gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); @@ -182,15 +182,15 @@ u32 temp; struct aper_size_info_lvl2 *values; - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (temp & 0x0000000e); - values = A_SIZE_LVL2(agp_bridge.aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + values = A_SIZE_LVL2(agp_bridge->aperture_sizes); + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -204,22 +204,22 @@ u32 temp; u16 enable_reg; - current_size = A_SIZE_LVL2(agp_bridge.current_size); + current_size = A_SIZE_LVL2(agp_bridge->current_size); /* Get the memory mapped registers */ - pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); + pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096); /* Write out the address of the gatt table */ OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge->gatt_bus_addr); /* Write the Sync register */ - pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); + pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80); /* Set indexing mode */ - pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00); + pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00); /* Write the enable register */ enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); @@ -227,10 +227,10 @@ OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); /* Write out the size register */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 0x00000001); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); /* Flush the tlb */ OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); @@ -244,16 +244,16 @@ u32 temp; u16 enable_reg; - previous_size = A_SIZE_LVL2(agp_bridge.previous_size); + previous_size = A_SIZE_LVL2(agp_bridge->previous_size); enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); enable_reg = (enable_reg & ~(0x0004)); OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); /* Write back the previous size and disable gart translation */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp); temp = ((temp & ~(0x0000000f)) | previous_size->size_value); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp); iounmap((void *) amd_irongate_private.registers); } @@ -274,7 +274,7 @@ { /* Only type 0 is supported by the irongate */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } static int amd_insert_memory(agp_memory * mem, @@ -284,7 +284,7 @@ unsigned long *cur_gatt; unsigned long addr; - num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != 0 || mem->type != 0) { return -EINVAL; @@ -295,7 +295,7 @@ j = pg_start; while (j < (pg_start + mem->page_count)) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { return -EBUSY; @@ -309,12 +309,12 @@ } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); cur_gatt[GET_GATT_OFF(addr)] = - agp_bridge.mask_memory(mem->memory[i], mem->type); + agp_bridge->mask_memory(mem->memory[i], mem->type); } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -329,13 +329,13 @@ return -EINVAL; } for (i = pg_start; i < (mem->page_count + pg_start); i++) { - addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = GET_GATT(addr); cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; + (unsigned long) agp_bridge->scratch_page; } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -357,30 +357,30 @@ static int __init amd_irongate_setup (struct pci_dev *pdev) { - agp_bridge.masks = amd_irongate_masks; - agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; - agp_bridge.size_type = LVL2_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = (void *) &amd_irongate_private; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = amd_irongate_configure; - agp_bridge.fetch_size = amd_irongate_fetch_size; - agp_bridge.cleanup = amd_irongate_cleanup; - agp_bridge.tlb_flush = amd_irongate_tlbflush; - agp_bridge.mask_memory = amd_irongate_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = amd_create_gatt_table; - agp_bridge.free_gatt_table = amd_free_gatt_table; - agp_bridge.insert_memory = amd_insert_memory; - agp_bridge.remove_memory = amd_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = amd_irongate_masks; + agp_bridge->aperture_sizes = (void *) amd_irongate_sizes; + agp_bridge->size_type = LVL2_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = (void *) &amd_irongate_private; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = amd_irongate_configure; + agp_bridge->fetch_size = amd_irongate_fetch_size; + agp_bridge->cleanup = amd_irongate_cleanup; + agp_bridge->tlb_flush = amd_irongate_tlbflush; + agp_bridge->mask_memory = amd_irongate_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = amd_create_gatt_table; + agp_bridge->free_gatt_table = amd_free_gatt_table; + agp_bridge->insert_memory = amd_insert_memory; + agp_bridge->remove_memory = amd_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -416,7 +416,7 @@ while (devs[j].chipset_name != NULL) { if (pdev->device == devs[j].device_id) { printk (KERN_INFO PFX "Detected AMD %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = devs[j].chipset; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -430,7 +430,7 @@ if (agp_try_unsupported) { printk(KERN_WARNING PFX "Trying generic AMD routines" " for device id: %04x\n", pdev->device); - agp_bridge.type = AMD_GENERIC; + agp_bridge->type = AMD_GENERIC; return amd_irongate_setup(pdev); } @@ -455,10 +455,10 @@ return -ENODEV; if (agp_lookup_host_bridge(dev) != -ENODEV) { - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); amd_k7_agp_driver.dev = dev; agp_register_driver(&amd_k7_agp_driver); return 0; @@ -492,7 +492,7 @@ ret_val = pci_module_init(&agp_amdk7_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/amd-k8-agp.c b/drivers/char/agp/amd-k8-agp.c --- a/drivers/char/agp/amd-k8-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/amd-k8-agp.c Wed Feb 12 14:47:58 2003 @@ -70,7 +70,7 @@ /* gatt table should be empty. */ while (j < (pg_start + mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) + if (!PGE_EMPTY(agp_bridge->gatt_table[j])) return -EBUSY; j++; } @@ -81,7 +81,7 @@ } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = agp_bridge.mask_memory(mem->memory[i], mem->type); + addr = agp_bridge->mask_memory(mem->memory[i], mem->type); tmp = addr; BUG_ON(tmp & 0xffffff0000000ffc); @@ -89,9 +89,9 @@ pte |=(tmp & 0x00000000fffff000); pte |= 1<<1|1<<0; - agp_bridge.gatt_table[j] = pte; + agp_bridge->gatt_table[j] = pte; } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -134,12 +134,12 @@ temp = (temp & 0xe); values = A_SIZE_32(x86_64_aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -225,14 +225,14 @@ int current_size; int tmp, tmp2, i; u64 aperbar; - unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real); + unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); /* Configure AGP regs in each x86-64 host bridge. */ pci_for_each_dev(dev) { if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 && PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) { - agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus); + agp_bridge->gart_bus_addr = amd_x86_64_configure(dev,gatt_bus); hammer = dev; /* @@ -248,7 +248,7 @@ /* Shadow x86-64 registers into 8151 registers. */ - dev = agp_bridge.dev; + dev = agp_bridge->dev; if (!dev) return -ENODEV; @@ -315,7 +315,7 @@ static unsigned long amd_8151_mask_memory(unsigned long addr, int type) { - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } @@ -368,12 +368,12 @@ } - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &command); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &command); command = agp_collect_device_status(mode, command); command |= 0x100; - pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_COMMAND, command); + pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_COMMAND, command); agp_device_command(command, 1); } @@ -381,30 +381,30 @@ static int __init amd_8151_setup (struct pci_dev *pdev) { - agp_bridge.masks = amd_8151_masks; - agp_bridge.aperture_sizes = (void *) amd_8151_sizes; - agp_bridge.size_type = U32_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = amd_8151_configure; - agp_bridge.fetch_size = amd_x86_64_fetch_size; - agp_bridge.cleanup = amd_8151_cleanup; - agp_bridge.tlb_flush = amd_x86_64_tlbflush; - agp_bridge.mask_memory = amd_8151_mask_memory; - agp_bridge.agp_enable = agp_x86_64_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = x86_64_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = amd_8151_masks; + agp_bridge->aperture_sizes = (void *) amd_8151_sizes; + agp_bridge->size_type = U32_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = amd_8151_configure; + agp_bridge->fetch_size = amd_x86_64_fetch_size; + agp_bridge->cleanup = amd_8151_cleanup; + agp_bridge->tlb_flush = amd_x86_64_tlbflush; + agp_bridge->mask_memory = amd_8151_mask_memory; + agp_bridge->agp_enable = agp_x86_64_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = x86_64_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -420,11 +420,11 @@ if (cap_ptr == 0) return -ENODEV; - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); amd_8151_setup(dev); amd_k8_agp_driver.dev = dev; agp_register_driver(&amd_k8_agp_driver); @@ -458,9 +458,9 @@ ret_val = pci_module_init(&agp_amdk8_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; - agp_bridge.type = AMD_8151; + agp_bridge->type = AMD_8151; return ret_val; } diff -Nru a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c --- a/drivers/char/agp/backend.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/backend.c Wed Feb 12 14:47:58 2003 @@ -26,6 +26,7 @@ * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ + #include #include #include @@ -44,26 +45,27 @@ #define AGPGART_VERSION_MAJOR 0 #define AGPGART_VERSION_MINOR 100 -struct agp_bridge_data agp_bridge = { .type = NOT_SUPPORTED }; +struct agp_bridge_data agp_bridge_dummy = { .type = NOT_SUPPORTED }; +struct agp_bridge_data *agp_bridge = &agp_bridge_dummy; int agp_backend_acquire(void) { - if (agp_bridge.type == NOT_SUPPORTED) + if (agp_bridge->type == NOT_SUPPORTED) return -EINVAL; - if (atomic_read(&agp_bridge.agp_in_use) != 0) + if (atomic_read(&agp_bridge->agp_in_use) != 0) return -EBUSY; - atomic_inc(&agp_bridge.agp_in_use); + atomic_inc(&agp_bridge->agp_in_use); return 0; } void agp_backend_release(void) { - if (agp_bridge.type == NOT_SUPPORTED) + if (agp_bridge->type == NOT_SUPPORTED) return; - atomic_dec(&agp_bridge.agp_in_use); + atomic_dec(&agp_bridge->agp_in_use); } struct agp_max_table { @@ -114,38 +116,38 @@ { int size_value, rc, got_gatt=0, got_keylist=0; - agp_bridge.max_memory_agp = agp_find_max(); - agp_bridge.version = &agp_current_version; + agp_bridge->max_memory_agp = agp_find_max(); + agp_bridge->version = &agp_current_version; - if (agp_bridge.needs_scratch_page == TRUE) { + if (agp_bridge->needs_scratch_page == TRUE) { void *addr; - addr = agp_bridge.agp_alloc_page(); + addr = agp_bridge->agp_alloc_page(); if (addr == NULL) { printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); return -ENOMEM; } - agp_bridge.scratch_page_real = virt_to_phys(addr); - agp_bridge.scratch_page = - agp_bridge.mask_memory(agp_bridge.scratch_page_real, 0); + agp_bridge->scratch_page_real = virt_to_phys(addr); + agp_bridge->scratch_page = + agp_bridge->mask_memory(agp_bridge->scratch_page_real, 0); } - size_value = agp_bridge.fetch_size(); + size_value = agp_bridge->fetch_size(); if (size_value == 0) { printk(KERN_ERR PFX "unable to determine aperture size.\n"); rc = -EINVAL; goto err_out; } - if (agp_bridge.create_gatt_table()) { + if (agp_bridge->create_gatt_table()) { printk(KERN_ERR PFX "unable to get memory for graphics translation table.\n"); rc = -ENOMEM; goto err_out; } got_gatt = 1; - agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); - if (agp_bridge.key_list == NULL) { + agp_bridge->key_list = vmalloc(PAGE_SIZE * 4); + if (agp_bridge->key_list == NULL) { printk(KERN_ERR PFX "error allocating memory for key lists.\n"); rc = -ENOMEM; goto err_out; @@ -153,27 +155,27 @@ got_keylist = 1; /* FIXME vmalloc'd memory not guaranteed contiguous */ - memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); + memset(agp_bridge->key_list, 0, PAGE_SIZE * 4); - if (agp_bridge.configure()) { + if (agp_bridge->configure()) { printk(KERN_ERR PFX "error configuring host chipset.\n"); rc = -EINVAL; goto err_out; } printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", - size_value, agp_bridge.gart_bus_addr); + size_value, agp_bridge->gart_bus_addr); return 0; err_out: - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page_real)); + if (agp_bridge->needs_scratch_page == TRUE) { + agp_bridge->agp_destroy_page(phys_to_virt(agp_bridge->scratch_page_real)); } if (got_gatt) - agp_bridge.free_gatt_table(); + agp_bridge->free_gatt_table(); if (got_keylist) - vfree(agp_bridge.key_list); + vfree(agp_bridge->key_list); return rc; } @@ -181,13 +183,16 @@ /* cannot be __exit b/c as it could be called from __init code */ static void agp_backend_cleanup(void) { - agp_bridge.cleanup(); - agp_bridge.free_gatt_table(); - vfree(agp_bridge.key_list); - - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page_real)); - } + if (agp_bridge->cleanup != NULL) + agp_bridge->cleanup(); + if (agp_bridge->free_gatt_table != NULL) + agp_bridge->free_gatt_table(); + if (agp_bridge->key_list) + vfree(agp_bridge->key_list); + + if ((agp_bridge->agp_destroy_page!=NULL) && + (agp_bridge->needs_scratch_page == TRUE)) + agp_bridge->agp_destroy_page(phys_to_virt(agp_bridge->scratch_page_real)); } static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data) @@ -195,9 +200,9 @@ switch(rq) { case PM_SUSPEND: - return agp_bridge.suspend(); + return agp_bridge->suspend(); case PM_RESUME: - agp_bridge.resume(); + agp_bridge->resume(); return 0; } return 0; @@ -248,21 +253,25 @@ /* FIXME: What to do with this? */ inter_module_register("drm_agp", THIS_MODULE, &drm_agp); - pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); + pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge->dev), agp_power); agp_count++; return 0; frontend_err: agp_backend_cleanup(); err_out: - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; module_put(drv->owner); + drv->dev = NULL; return ret_val; } int agp_unregister_driver(struct agp_driver *drv) { - agp_bridge.type = NOT_SUPPORTED; + if (drv->dev==NULL) + return -ENODEV; + + agp_bridge->type = NOT_SUPPORTED; pm_unregister_all(agp_power); agp_frontend_cleanup(); agp_backend_cleanup(); @@ -282,8 +291,8 @@ already_initialised = 1; - memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); - agp_bridge.type = NOT_SUPPORTED; + memset(agp_bridge, 0, sizeof(struct agp_bridge_data)); + agp_bridge->type = NOT_SUPPORTED; printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n", AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); diff -Nru a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c --- a/drivers/char/agp/frontend.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/frontend.c Wed Feb 12 14:47:57 2003 @@ -97,7 +97,9 @@ int size, pgprot_t page_prot) { agp_segment_priv *seg; - int num_segments, pg_start, pg_count, i; + int num_segments, i; + off_t pg_start; + size_t pg_count; pg_start = offset / 4096; pg_count = size / 4096; @@ -174,7 +176,7 @@ agp_segment_priv **ret_seg; agp_segment_priv *seg; agp_segment *user_seg; - int i; + size_t i; seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), GFP_KERNEL); if (seg == NULL) { @@ -578,8 +580,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) { - int size; - int current_size; + unsigned int size, current_size; unsigned long offset; agp_client *client; agp_file_private *priv = (agp_file_private *) file->private_data; @@ -611,8 +612,11 @@ if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot)) goto out_inval; - if (remap_page_range(vma, vma->vm_start, (kerninfo.aper_base + offset), - size, vma->vm_page_prot)) { + if (kerninfo.vm_ops) { + vma->vm_ops = kerninfo.vm_ops; + } else if (remap_page_range(vma, vma->vm_start, + (kerninfo.aper_base + offset), + size, vma->vm_page_prot)) { goto out_again; } AGP_UNLOCK(); @@ -623,8 +627,11 @@ if (size != current_size) goto out_inval; - if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base, - size, vma->vm_page_prot)) { + if (kerninfo.vm_ops) { + vma->vm_ops = kerninfo.vm_ops; + } else if (remap_page_range(vma, vma->vm_start, + kerninfo.aper_base, + size, vma->vm_page_prot)) { goto out_again; } AGP_UNLOCK(); diff -Nru a/drivers/char/agp/generic-3.0.c b/drivers/char/agp/generic-3.0.c --- a/drivers/char/agp/generic-3.0.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/generic-3.0.c Wed Feb 12 14:47:57 2003 @@ -77,7 +77,7 @@ struct agp_3_0_dev *dev; }; - struct pci_dev *td = agp_bridge.dev, *dev; + struct pci_dev *td = agp_bridge->dev, *dev; struct list_head *head = &dev_list->list, *pos; struct agp_3_0_dev *cur; struct isoch_data *master, target; @@ -117,8 +117,8 @@ if((ret = agp_3_0_dev_list_sort(dev_list, ndevs)) != 0) goto free_and_exit; - pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); - pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + pci_read_config_dword(td, agp_bridge->capndx + 0x0c, &tnistat); + pci_read_config_dword(td, agp_bridge->capndx + 0x04, &tstatus); /* Extract power-on defaults from the target */ target.maxbw = (tnistat >> 16) & 0xff; @@ -170,13 +170,13 @@ * in the target's NISTAT register, so we need to do this now * to get an accurate value for ISOCH_N later. */ - pci_read_config_word(td, agp_bridge.capndx + 0x20, &tnicmd); + pci_read_config_word(td, agp_bridge->capndx + 0x20, &tnicmd); tnicmd &= ~(0x3 << 6); tnicmd |= target.y << 6; - pci_write_config_word(td, agp_bridge.capndx + 0x20, tnicmd); + pci_write_config_word(td, agp_bridge->capndx + 0x20, tnicmd); /* Reread the target's ISOCH_N */ - pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); + pci_read_config_dword(td, agp_bridge->capndx + 0x0c, &tnistat); target.n = (tnistat >> 8) & 0xff; /* Calculate the minimum ISOCH_N needed by each master */ @@ -296,7 +296,7 @@ u32 trq, mrq, rem; unsigned int cdev = 0; - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x04, &tstatus); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + 0x04, &tstatus); trq = (tstatus >> 24) & 0xff; mrq = trq / ndevs; @@ -321,7 +321,7 @@ */ static int agp_3_0_node_enable(u32 mode, u32 minor) { - struct pci_dev *td = agp_bridge.dev, *dev; + struct pci_dev *td = agp_bridge->dev, *dev; u8 bus_num, mcapndx; u32 isoch, arqsz, cal_cycle, tmp, rate; u32 tstatus, tcmd, mcmd, mstatus, ncapid; @@ -364,7 +364,7 @@ } /* Extract some power-on defaults from the target */ - pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + pci_read_config_dword(td, agp_bridge->capndx + 0x04, &tstatus); isoch = (tstatus >> 17) & 0x1; arqsz = (tstatus >> 13) & 0x7; cal_cycle = (tstatus >> 10) & 0x7; @@ -470,7 +470,7 @@ * Also set the AGP_ENABLE bit, effectively 'turning on' the * target (this has to be done _before_ turning on the masters). */ - pci_read_config_dword(td, agp_bridge.capndx + 0x08, &tcmd); + pci_read_config_dword(td, agp_bridge->capndx + 0x08, &tcmd); tcmd &= ~(0x7 << 10); tcmd &= ~0x7; @@ -479,7 +479,7 @@ tcmd |= 0x1 << 8; tcmd |= rate; - pci_write_config_dword(td, agp_bridge.capndx + 0x08, tcmd); + pci_write_config_dword(td, agp_bridge->capndx + 0x08, tcmd); /* * Set the target's advertised arqsz value, the minimum supported @@ -525,11 +525,11 @@ * (AGP 3.0 devices are required to operate as AGP 2.0 devices * when not using 3.0 electricals. */ -int agp_generic_agp_3_0_enable(u32 mode) +void agp_generic_agp_3_0_enable(u32 mode) { u32 ncapid, major, minor, agp_3_0; - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx, &ncapid); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx, &ncapid); major = (ncapid >> 20) & 0xf; minor = (ncapid >> 16) & 0xf; @@ -537,16 +537,13 @@ printk(KERN_INFO PFX "Found an AGP %d.%d compliant device.\n",major, minor); if(major >= 3) { - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x4, &agp_3_0); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + 0x4, &agp_3_0); /* * Check to see if we are operating in 3.0 mode */ - if((agp_3_0 >> 3) & 0x1) { + if((agp_3_0 >> 3) & 0x1) agp_3_0_node_enable(mode, minor); - return TRUE; - } } - return FALSE; } EXPORT_SYMBOL(agp_generic_agp_3_0_enable); diff -Nru a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c --- a/drivers/char/agp/generic.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/generic.c Wed Feb 12 14:47:57 2003 @@ -52,16 +52,16 @@ return; if (key < MAXKEY) - clear_bit(key, agp_bridge.key_list); + clear_bit(key, agp_bridge->key_list); } static int agp_get_key(void) { int bit; - bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); + bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); if (bit < MAXKEY) { - set_bit(bit, agp_bridge.key_list); + set_bit(bit, agp_bridge->key_list); return bit; } return -1; @@ -96,21 +96,21 @@ void agp_free_memory(agp_memory * curr) { - int i; + size_t i; - if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) + if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL)) return; if (curr->is_bound == TRUE) agp_unbind_memory(curr); if (curr->type != 0) { - agp_bridge.free_by_type(curr); + agp_bridge->free_by_type(curr); return; } if (curr->page_count != 0) { for (i = 0; i < curr->page_count; i++) { - agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i])); + agp_bridge->agp_destroy_page(phys_to_virt(curr->memory[i])); } } agp_free_key(curr->key); @@ -124,16 +124,16 @@ { int scratch_pages; agp_memory *new; - int i; + size_t i; - if (agp_bridge.type == NOT_SUPPORTED) + if (agp_bridge->type == NOT_SUPPORTED) return NULL; - if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) > agp_bridge.max_memory_agp) + if ((atomic_read(&agp_bridge->current_memory_agp) + page_count) > agp_bridge->max_memory_agp) return NULL; if (type != 0) { - new = agp_bridge.alloc_by_type(page_count, type); + new = agp_bridge->alloc_by_type(page_count, type); return new; } @@ -145,7 +145,7 @@ return NULL; for (i = 0; i < page_count; i++) { - void *addr = agp_bridge.agp_alloc_page(); + void *addr = agp_bridge->agp_alloc_page(); if (addr == NULL) { agp_free_memory(new); @@ -167,9 +167,9 @@ int current_size; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: current_size = A_SIZE_8(temp)->size; break; @@ -201,9 +201,9 @@ int num_entries; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; @@ -235,20 +235,21 @@ int agp_copy_info(agp_kern_info * info) { memset(info, 0, sizeof(agp_kern_info)); - if (agp_bridge.type == NOT_SUPPORTED) { - info->chipset = agp_bridge.type; + if (agp_bridge->type == NOT_SUPPORTED) { + info->chipset = agp_bridge->type; return -EIO; } - info->version.major = agp_bridge.version->major; - info->version.minor = agp_bridge.version->minor; - info->device = agp_bridge.dev; - info->chipset = agp_bridge.type; - info->mode = agp_bridge.mode; - info->aper_base = agp_bridge.gart_bus_addr; + info->version.major = agp_bridge->version->major; + info->version.minor = agp_bridge->version->minor; + info->device = agp_bridge->dev; + info->chipset = agp_bridge->type; + info->mode = agp_bridge->mode; + info->aper_base = agp_bridge->gart_bus_addr; info->aper_size = agp_return_size(); - info->max_memory = agp_bridge.max_memory_agp; - info->current_memory = atomic_read(&agp_bridge.current_memory_agp); - info->cant_use_aperture = agp_bridge.cant_use_aperture; + info->max_memory = agp_bridge->max_memory_agp; + info->current_memory = atomic_read(&agp_bridge->current_memory_agp); + info->cant_use_aperture = agp_bridge->cant_use_aperture; + info->vm_ops = agp_bridge->vm_ops; info->page_mask = ~0UL; return 0; } @@ -265,7 +266,7 @@ { int ret_val; - if ((agp_bridge.type == NOT_SUPPORTED) || + if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL) || (curr->is_bound == TRUE)) { return -EINVAL; } @@ -273,7 +274,7 @@ CACHE_FLUSH(); curr->is_flushed = TRUE; } - ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); + ret_val = agp_bridge->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) return ret_val; @@ -287,13 +288,13 @@ { int ret_val; - if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) + if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL)) return -EINVAL; if (curr->is_bound != TRUE) return -EINVAL; - ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); + ret_val = agp_bridge->remove_memory(curr, curr->pg_start, curr->type); if (ret_val != 0) return ret_val; @@ -395,15 +396,15 @@ { u32 command; - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_STATUS, + pci_read_config_dword(agp_bridge->dev, + agp_bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(mode, command); command |= 0x100; - pci_write_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_COMMAND, + pci_write_config_dword(agp_bridge->dev, + agp_bridge->capndx + PCI_AGP_COMMAND, command); agp_device_command(command, 0); @@ -421,17 +422,17 @@ struct page *page; /* The generic routines can't handle 2 level gatt's */ - if (agp_bridge.size_type == LVL2_APER_SIZE) + if (agp_bridge->size_type == LVL2_APER_SIZE) return -EINVAL; table = NULL; - i = agp_bridge.aperture_size_idx; - temp = agp_bridge.current_size; + i = agp_bridge->aperture_size_idx; + temp = agp_bridge->current_size; size = page_order = num_entries = 0; - if (agp_bridge.size_type != FIXED_APER_SIZE) { + if (agp_bridge->size_type != FIXED_APER_SIZE) { do { - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: size = A_SIZE_8(temp)->size; page_order = @@ -462,15 +463,15 @@ if (table == NULL) { i++; - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: - agp_bridge.current_size = A_IDX8(); + agp_bridge->current_size = A_IDX8(); break; case U16_APER_SIZE: - agp_bridge.current_size = A_IDX16(); + agp_bridge->current_size = A_IDX16(); break; case U32_APER_SIZE: - agp_bridge.current_size = A_IDX32(); + agp_bridge->current_size = A_IDX32(); break; /* This case will never really * happen. @@ -478,15 +479,15 @@ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: - agp_bridge.current_size = - agp_bridge.current_size; + agp_bridge->current_size = + agp_bridge->current_size; break; } - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; } else { - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; } - } while ((table == NULL) && (i < agp_bridge.num_aperture_sizes)); + } while ((table == NULL) && (i < agp_bridge->num_aperture_sizes)); } else { size = ((struct aper_size_info_fixed *) temp)->size; page_order = ((struct aper_size_info_fixed *) temp)->page_order; @@ -502,14 +503,14 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge->gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; CACHE_FLUSH(); - agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); CACHE_FLUSH(); - if (agp_bridge.gatt_table == NULL) { + if (agp_bridge->gatt_table == NULL) { for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); @@ -517,11 +518,11 @@ return -ENOMEM; } - agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + agp_bridge->gatt_bus_addr = virt_to_phys(agp_bridge->gatt_table_real); /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) - agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + agp_bridge->gatt_table[i] = (unsigned long) agp_bridge->scratch_page; return 0; } @@ -543,9 +544,9 @@ void *temp; struct page *page; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: page_order = A_SIZE_8(temp)->page_order; break; @@ -572,25 +573,27 @@ * from the table. */ - iounmap(agp_bridge.gatt_table); - table = (char *) agp_bridge.gatt_table_real; + iounmap(agp_bridge->gatt_table); + table = (char *) agp_bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) ClearPageReserved(page); - free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); + free_pages((unsigned long) agp_bridge->gatt_table_real, page_order); return 0; } int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type) { - int i, j, num_entries; + int num_entries; + size_t i; + off_t j; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; - switch (agp_bridge.size_type) { + switch (agp_bridge->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; @@ -627,7 +630,7 @@ j = pg_start; while (j < (pg_start + mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + if (!PGE_EMPTY(agp_bridge->gatt_table[j])) { return -EBUSY; } j++; @@ -639,16 +642,16 @@ } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) - agp_bridge.gatt_table[j] = - agp_bridge.mask_memory(mem->memory[i], mem->type); + agp_bridge->gatt_table[j] = + agp_bridge->mask_memory(mem->memory[i], mem->type); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type) { - int i; + size_t i; if (type != 0 || mem->type != 0) { /* The generic routines know nothing of memory types */ @@ -657,11 +660,11 @@ /* AK: bogus, should encode addresses > 4GB */ for (i = pg_start; i < (mem->page_count + pg_start); i++) { - agp_bridge.gatt_table[i] = - (unsigned long) agp_bridge.scratch_page; + agp_bridge->gatt_table[i] = + (unsigned long) agp_bridge->scratch_page; } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -700,7 +703,7 @@ get_page(page); SetPageLocked(page); - atomic_inc(&agp_bridge.current_memory_agp); + atomic_inc(&agp_bridge->current_memory_agp); return page_address(page); } @@ -716,16 +719,16 @@ put_page(page); unlock_page(page); free_page((unsigned long)addr); - atomic_dec(&agp_bridge.current_memory_agp); + atomic_dec(&agp_bridge->current_memory_agp); } /* End Basic Page Allocation Routines */ void agp_enable(u32 mode) { - if (agp_bridge.type == NOT_SUPPORTED) + if (agp_bridge->type == NOT_SUPPORTED) return; - agp_bridge.agp_enable(mode); + agp_bridge->agp_enable(mode); } EXPORT_SYMBOL(agp_free_memory); diff -Nru a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c --- a/drivers/char/agp/hp-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/hp-agp.c Wed Feb 12 14:47:58 2003 @@ -176,7 +176,7 @@ size = hp_private.gart_size / MB(1); hp_zx1_sizes[0].size = size; - agp_bridge.current_size = (void *) &hp_zx1_sizes[0]; + agp_bridge->current_size = (void *) &hp_zx1_sizes[0]; return size; } @@ -184,10 +184,10 @@ { struct _hp_private *hp = &hp_private; - agp_bridge.gart_bus_addr = hp->gart_base; - agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); + agp_bridge->gart_bus_addr = hp->gart_base; + agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP); + pci_read_config_dword(agp_bridge->dev, + agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode); if (hp->io_pdir_owner) { OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, @@ -241,7 +241,7 @@ } for (i = 0; i < hp->gatt_entries; i++) { - hp->gatt[i] = (unsigned long) agp_bridge.scratch_page; + hp->gatt[i] = (unsigned long) agp_bridge->scratch_page; } return 0; @@ -296,11 +296,11 @@ for (k = 0; k < hp->io_pages_per_kpage; k++, j++, paddr += hp->io_page_size) { - hp->gatt[j] = agp_bridge.mask_memory(paddr, type); + hp->gatt[j] = agp_bridge->mask_memory(paddr, type); } } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -316,10 +316,10 @@ io_pg_start = hp->io_pages_per_kpage * pg_start; io_pg_count = hp->io_pages_per_kpage * mem->page_count; for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { - hp->gatt[i] = agp_bridge.scratch_page; + hp->gatt[i] = agp_bridge->scratch_page; } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -330,46 +330,46 @@ static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused))) { - agp_bridge.masks = hp_zx1_masks; - agp_bridge.dev_private_data = NULL; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = hp_zx1_configure; - agp_bridge.fetch_size = hp_zx1_fetch_size; - agp_bridge.cleanup = hp_zx1_cleanup; - agp_bridge.tlb_flush = hp_zx1_tlbflush; - agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; - agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; - agp_bridge.insert_memory = hp_zx1_insert_memory; - agp_bridge.remove_memory = hp_zx1_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.cant_use_aperture = 1; + agp_bridge->masks = hp_zx1_masks; + agp_bridge->dev_private_data = NULL; + agp_bridge->size_type = FIXED_APER_SIZE; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = hp_zx1_configure; + agp_bridge->fetch_size = hp_zx1_fetch_size; + agp_bridge->cleanup = hp_zx1_cleanup; + agp_bridge->tlb_flush = hp_zx1_tlbflush; + agp_bridge->mask_memory = hp_zx1_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = hp_zx1_create_gatt_table; + agp_bridge->free_gatt_table = hp_zx1_free_gatt_table; + agp_bridge->insert_memory = hp_zx1_insert_memory; + agp_bridge->remove_memory = hp_zx1_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->cant_use_aperture = 1; return hp_zx1_ioc_init(); } static int __init agp_find_supported_device(struct pci_dev *dev) { - agp_bridge.dev = dev; + agp_bridge->dev = dev; /* ZX1 LBAs can be either PCI or AGP bridges */ if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n", dev->slot_name); - agp_bridge.type = HP_ZX1; - agp_bridge.dev = dev; + agp_bridge->type = HP_ZX1; + agp_bridge->dev = dev; return hp_zx1_setup(dev); } return -ENODEV; } static struct agp_driver hp_agp_driver = { - .owner = THIS_MODULE; + .owner = THIS_MODULE, }; static int __init agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent) @@ -408,7 +408,7 @@ ret_val = pci_module_init(&agp_hp_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c --- a/drivers/char/agp/i460-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/i460-agp.c Wed Feb 12 14:47:58 2003 @@ -96,7 +96,7 @@ struct aper_size_info_8 *values; /* Determine the GART page size */ - pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); @@ -107,9 +107,9 @@ return 0; } - values = A_SIZE_8(agp_bridge.aperture_sizes); + values = A_SIZE_8(agp_bridge->aperture_sizes); - pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); /* Exit now if the IO drivers for the GART SRAMS are turned off */ if (temp & I460_SRAM_IO_DISABLE) { @@ -130,7 +130,7 @@ else i460.dynamic_apbase = INTEL_I460_APBASE; - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { /* * Dynamically calculate the proper num_entries and page_order values for * the define aperture sizes. Take care not to shift off the end of @@ -140,11 +140,11 @@ values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); } - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { /* Neglect control bits when matching up size_value */ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { - agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -166,8 +166,8 @@ { u8 temp; - pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); - pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, ((temp & ~I460_AGPSIZ_MASK) | size_value)); } @@ -175,7 +175,7 @@ { struct aper_size_info_8 *previous_size; - previous_size = A_SIZE_8(agp_bridge.previous_size); + previous_size = A_SIZE_8(agp_bridge->previous_size); i460_write_agpsiz(previous_size->size_value); if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) @@ -194,7 +194,7 @@ temp.large = 0; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); i460_write_agpsiz(current_size->size_value); /* @@ -202,14 +202,14 @@ * This has to be done since the AGP aperture can be above 4GB on * 460 based systems. */ - pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase, &(temp.small[0])); - pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase + 4, &(temp.small[1])); + pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); /* Clear BAR control bits */ - agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); - pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); - pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); /* @@ -234,16 +234,16 @@ /* * Load up the fixed address of the GART SRAMS which hold our GATT table. */ - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); /* These are no good, the should be removed from the agp_bridge strucure... */ - agp_bridge.gatt_table_real = NULL; - agp_bridge.gatt_table = NULL; - agp_bridge.gatt_bus_addr = 0; + agp_bridge->gatt_table_real = NULL; + agp_bridge->gatt_table = NULL; + agp_bridge->gatt_bus_addr = 0; for (i = 0; i < num_entries; ++i) WR_GATT(i, 0); @@ -256,7 +256,7 @@ int num_entries, i; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; @@ -284,7 +284,7 @@ io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { @@ -306,7 +306,7 @@ for (i = 0, j = io_pg_start; i < mem->page_count; i++) { paddr = mem->memory[i]; for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) - WR_GATT(j, agp_bridge.mask_memory(paddr, mem->type)); + WR_GATT(j, agp_bridge->mask_memory(paddr, mem->type)); } WR_FLUSH_GATT(j - 1); return 0; @@ -364,7 +364,7 @@ lp->paddr = virt_to_phys(lpage); lp->refcount = 0; - atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp); + atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); return 0; } @@ -374,7 +374,7 @@ lp->alloced_map = NULL; free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); - atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp); + atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); } static int i460_insert_memory_large_io_page (agp_memory * mem, off_t pg_start, int type) @@ -383,7 +383,7 @@ struct lp_desc *start, *end, *lp; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ @@ -417,7 +417,7 @@ if (i460_alloc_large_page(lp) < 0) return -ENOMEM; pg = lp - i460.lp_desc; - WR_GATT(pg, agp_bridge.mask_memory(lp->paddr, 0)); + WR_GATT(pg, agp_bridge->mask_memory(lp->paddr, 0)); WR_FLUSH_GATT(pg); } @@ -439,7 +439,7 @@ struct lp_desc *start, *end, *lp; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ @@ -519,43 +519,43 @@ static unsigned long i460_mask_memory (unsigned long addr, int type) { /* Make sure the returned address is a valid GATT entry */ - return (agp_bridge.masks[0].mask + return (agp_bridge->masks[0].mask | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12)); } static int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) { - agp_bridge.masks = i460_masks; - agp_bridge.aperture_sizes = (void *) i460_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 3; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = i460_configure; - agp_bridge.fetch_size = i460_fetch_size; - agp_bridge.cleanup = i460_cleanup; - agp_bridge.tlb_flush = i460_tlb_flush; - agp_bridge.mask_memory = i460_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = i460_create_gatt_table; - agp_bridge.free_gatt_table = i460_free_gatt_table; + agp_bridge->masks = i460_masks; + agp_bridge->aperture_sizes = (void *) i460_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 3; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = i460_configure; + agp_bridge->fetch_size = i460_fetch_size; + agp_bridge->cleanup = i460_cleanup; + agp_bridge->tlb_flush = i460_tlb_flush; + agp_bridge->mask_memory = i460_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = i460_create_gatt_table; + agp_bridge->free_gatt_table = i460_free_gatt_table; #if I460_LARGE_IO_PAGES - agp_bridge.insert_memory = i460_insert_memory; - agp_bridge.remove_memory = i460_remove_memory; - agp_bridge.agp_alloc_page = i460_alloc_page; - agp_bridge.agp_destroy_page = i460_destroy_page; + agp_bridge->insert_memory = i460_insert_memory; + agp_bridge->remove_memory = i460_remove_memory; + agp_bridge->agp_alloc_page = i460_alloc_page; + agp_bridge->agp_destroy_page = i460_destroy_page; #else - agp_bridge.insert_memory = i460_insert_memory_small_io_page; - agp_bridge.remove_memory = i460_remove_memory_small_io_page; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge->insert_memory = i460_insert_memory_small_io_page; + agp_bridge->remove_memory = i460_remove_memory_small_io_page; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; #endif - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 1; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 1; return 0; } @@ -571,8 +571,8 @@ if (cap_ptr == 0) return -ENODEV; - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; intel_i460_setup(dev); i460_agp_driver.dev = dev; agp_register_driver(&i460_agp_driver); @@ -605,7 +605,7 @@ ret_val = pci_module_init(&agp_intel_i460_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/i7x05-agp.c b/drivers/char/agp/i7x05-agp.c --- a/drivers/char/agp/i7x05-agp.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/i7x05-agp.c Wed Feb 12 14:47:57 2003 @@ -13,16 +13,16 @@ /* * For AGP 3.0 APSIZE is now 16 bits */ - pci_read_config_word (agp_bridge.dev, INTEL_I7505_APSIZE, &tmp); + pci_read_config_word (agp_bridge->dev, INTEL_I7505_APSIZE, &tmp); tmp = (tmp & 0xfff); - values = A_SIZE_16(agp_bridge.aperture_sizes); + values = A_SIZE_16(agp_bridge->aperture_sizes); - for (i=0; i < agp_bridge.num_aperture_sizes; i++) { + for (i=0; i < agp_bridge->num_aperture_sizes; i++) { if (tmp == values[i].size_value) { - agp_bridge.previous_size = agp_bridge.current_size = + agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -33,18 +33,18 @@ static void intel_7505_tlbflush(agp_memory *mem) { u32 temp; - pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7)); - pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp | (1 << 7)); + pci_read_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7)); + pci_read_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, temp | (1 << 7)); } static void intel_7505_cleanup(void) { aper_size_info_16 *previous_size; - previous_size = A_SIZE_16(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, INTEL_I7505_APSIZE, + previous_size = A_SIZE_16(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, INTEL_I7505_APSIZE, previous_size->size_value); } @@ -54,25 +54,25 @@ u32 temp; aper_size_info_16 *current_size; - current_size = A_SIZE_16(agp_bridge.current_size); + current_size = A_SIZE_16(agp_bridge->current_size); /* aperture size */ - pci_write_config_word(agp_bridge.dev, INTEL_I7505_APSIZE, + pci_write_config_word(agp_bridge->dev, INTEL_I7505_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_I7505_NAPBASELO, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_I7505_NAPBASELO, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase */ - pci_write_config_dword(agp_bridge.dev, INTEL_I7505_ATTBASE, - agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_I7505_ATTBASE, + agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_I7505_AGPCTRL, 0x0000); /* clear error registers */ - pci_write_config_byte(agp_bridge.dev, INTEL_I7505_ERRSTS, 0xff); + pci_write_config_byte(agp_bridge->dev, INTEL_I7505_ERRSTS, 0xff); return 0; } @@ -95,30 +95,30 @@ static int __init intel_7505_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_7505_sizes; - agp_bridge.size_type = U16_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_7505_configure; - agp_bridge.fetch_size = intel_7505_fetch_size; - agp_bridge.cleanup = intel_7505_cleanup; - agp_bridge.tlb_flush = intel_7505_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = i7505_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_7505_sizes; + agp_bridge->size_type = U16_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_7505_configure; + agp_bridge->fetch_size = intel_7505_fetch_size; + agp_bridge->cleanup = intel_7505_cleanup; + agp_bridge->tlb_flush = intel_7505_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = i7505_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -149,7 +149,7 @@ if (pdev->device == devs[j].device_id) { printk (KERN_INFO PFX "Detected Intel %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = devs[j].chipset; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -177,10 +177,10 @@ return -ENODEV; if (agp_lookup_host_bridge(dev) != -ENODEV) { - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode) + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode) i7x05_agp_driver.dev = dev; agp_register_driver(&i7x05_agp_driver); return 0; @@ -215,7 +215,7 @@ ret_val = pci_module_init(&agp_i7x05_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c --- a/drivers/char/agp/intel-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/intel-agp.c Wed Feb 12 14:47:58 2003 @@ -39,22 +39,22 @@ u32 smram_miscc; struct aper_size_info_fixed *values; - pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); - values = A_SIZE_FIX(agp_bridge.aperture_sizes); + pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); + values = A_SIZE_FIX(agp_bridge->aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { printk(KERN_WARNING PFX "i810 is disabled\n"); return 0; } if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + 1); - agp_bridge.aperture_size_idx = 1; + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + 1); + agp_bridge->aperture_size_idx = 1; return values[1].size; } else { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values); - agp_bridge.aperture_size_idx = 0; + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values); + agp_bridge->aperture_size_idx = 0; return values[0].size; } @@ -67,7 +67,7 @@ u32 temp; int i; - current_size = A_SIZE_FIX(agp_bridge.current_size); + current_size = A_SIZE_FIX(agp_bridge->current_size); pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); temp &= 0xfff80000; @@ -81,16 +81,16 @@ intel_i810_private.num_dcache_entries = 1024; } pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, - agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED); CACHE_FLUSH(); - if (agp_bridge.needs_scratch_page == TRUE) { + if (agp_bridge->needs_scratch_page == TRUE) { for (i = 0; i < current_size->num_entries; i++) { OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); + agp_bridge->scratch_page); } } return 0; @@ -118,14 +118,14 @@ int i, j, num_entries; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) { return -EINVAL; } for (j = pg_start; j < (pg_start + mem->page_count); j++) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + if (!PGE_EMPTY(agp_bridge->gatt_table[j])) { return -EBUSY; } } @@ -141,7 +141,7 @@ I810_PTE_VALID); } CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY)) @@ -154,11 +154,11 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (j * 4), - agp_bridge.mask_memory(mem->memory[i], mem->type)); + agp_bridge->mask_memory(mem->memory[i], mem->type)); } CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -170,11 +170,11 @@ for (i = pg_start; i < (mem->page_count + pg_start); i++) { OUTREG32(intel_i810_private.registers, I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); + agp_bridge->scratch_page); } CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -211,7 +211,7 @@ if (new == NULL) return NULL; - addr = agp_bridge.agp_alloc_page(); + addr = agp_bridge->agp_alloc_page(); if (addr == NULL) { /* Free this structure */ @@ -232,7 +232,7 @@ { agp_free_key(curr->key); if(curr->type == AGP_PHYS_MEMORY) { - agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[0])); + agp_bridge->agp_destroy_page(phys_to_virt(curr->memory[0])); vfree(curr->memory); } kfree(curr); @@ -241,37 +241,37 @@ static unsigned long intel_i810_mask_memory(unsigned long addr, int type) { /* Type checking must be done elsewhere */ - return addr | agp_bridge.masks[type].mask; + return addr | agp_bridge->masks[type].mask; } static int __init intel_i810_setup(struct pci_dev *i810_dev) { intel_i810_private.i810_dev = i810_dev; - agp_bridge.masks = intel_i810_masks; - agp_bridge.aperture_sizes = (void *) intel_i810_sizes; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - agp_bridge.dev_private_data = (void *) &intel_i810_private; - agp_bridge.needs_scratch_page = TRUE; - agp_bridge.configure = intel_i810_configure; - agp_bridge.fetch_size = intel_i810_fetch_size; - agp_bridge.cleanup = intel_i810_cleanup; - agp_bridge.tlb_flush = intel_i810_tlbflush; - agp_bridge.mask_memory = intel_i810_mask_memory; - agp_bridge.agp_enable = intel_i810_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = intel_i810_insert_entries; - agp_bridge.remove_memory = intel_i810_remove_entries; - agp_bridge.alloc_by_type = intel_i810_alloc_by_type; - agp_bridge.free_by_type = intel_i810_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_i810_masks; + agp_bridge->aperture_sizes = (void *) intel_i810_sizes; + agp_bridge->size_type = FIXED_APER_SIZE; + agp_bridge->num_aperture_sizes = 2; + agp_bridge->dev_private_data = (void *) &intel_i810_private; + agp_bridge->needs_scratch_page = TRUE; + agp_bridge->configure = intel_i810_configure; + agp_bridge->fetch_size = intel_i810_fetch_size; + agp_bridge->cleanup = intel_i810_cleanup; + agp_bridge->tlb_flush = intel_i810_tlbflush; + agp_bridge->mask_memory = intel_i810_mask_memory; + agp_bridge->agp_enable = intel_i810_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = intel_i810_insert_entries; + agp_bridge->remove_memory = intel_i810_remove_entries; + agp_bridge->alloc_by_type = intel_i810_alloc_by_type; + agp_bridge->free_by_type = intel_i810_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -296,7 +296,7 @@ u8 rdct; static const int ddt[4] = { 0, 16, 32, 64 }; - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); + pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: @@ -337,10 +337,10 @@ int num_entries; u32 temp; - size = agp_bridge.current_size; + size = agp_bridge->current_size; page_order = size->page_order; num_entries = size->num_entries; - agp_bridge.gatt_table_real = 0; + agp_bridge->gatt_table_real = 0; pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp); temp &= 0xfff80000; @@ -354,9 +354,9 @@ /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); - agp_bridge.gatt_table = NULL; + agp_bridge->gatt_table = NULL; - agp_bridge.gatt_bus_addr = temp; + agp_bridge->gatt_bus_addr = temp; return(0); } @@ -374,16 +374,16 @@ u16 gmch_ctrl; struct aper_size_info_fixed *values; - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); - values = A_SIZE_FIX(agp_bridge.aperture_sizes); + pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); + values = A_SIZE_FIX(agp_bridge->aperture_sizes); if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { - agp_bridge.previous_size = agp_bridge.current_size = (void *) values; - agp_bridge.aperture_size_idx = 0; + agp_bridge->previous_size = agp_bridge->current_size = (void *) values; + agp_bridge->aperture_size_idx = 0; return(values[0].size); } else { - agp_bridge.previous_size = agp_bridge.current_size = (void *) values; - agp_bridge.aperture_size_idx = 1; + agp_bridge->previous_size = agp_bridge->current_size = (void *) values; + agp_bridge->aperture_size_idx = 1; return(values[1].size); } @@ -397,21 +397,21 @@ u16 gmch_ctrl; int i; - current_size = A_SIZE_FIX(agp_bridge.current_size); + current_size = A_SIZE_FIX(agp_bridge->current_size); pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); + pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl); + pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl); - OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED); CACHE_FLUSH(); - if (agp_bridge.needs_scratch_page == TRUE) + if (agp_bridge->needs_scratch_page == TRUE) for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page); return (0); } @@ -426,7 +426,7 @@ int i,j,num_entries; void *temp; - temp = agp_bridge.current_size; + temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; if (pg_start < intel_i830_private.gtt_entries) { @@ -452,11 +452,11 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4), - agp_bridge.mask_memory(mem->memory[i], mem->type)); + agp_bridge->mask_memory(mem->memory[i], mem->type)); CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return(0); } @@ -473,11 +473,11 @@ } for (i = pg_start; i < (mem->page_count + pg_start); i++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page); CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return (0); } @@ -504,7 +504,7 @@ if (nw == NULL) return(NULL); - addr = agp_bridge.agp_alloc_page(); + addr = agp_bridge->agp_alloc_page(); if (addr == NULL) { /* free this structure */ agp_free_memory(nw); @@ -526,35 +526,35 @@ { intel_i830_private.i830_dev = i830_dev; - agp_bridge.masks = intel_i810_masks; - agp_bridge.aperture_sizes = (void *) intel_i830_sizes; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - - agp_bridge.dev_private_data = (void *) &intel_i830_private; - agp_bridge.needs_scratch_page = TRUE; - - agp_bridge.configure = intel_i830_configure; - agp_bridge.fetch_size = intel_i830_fetch_size; - agp_bridge.cleanup = intel_i830_cleanup; - agp_bridge.tlb_flush = intel_i810_tlbflush; - agp_bridge.mask_memory = intel_i810_mask_memory; - agp_bridge.agp_enable = intel_i810_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - - agp_bridge.create_gatt_table = intel_i830_create_gatt_table; - agp_bridge.free_gatt_table = intel_i830_free_gatt_table; - - agp_bridge.insert_memory = intel_i830_insert_entries; - agp_bridge.remove_memory = intel_i830_remove_entries; - agp_bridge.alloc_by_type = intel_i830_alloc_by_type; - agp_bridge.free_by_type = intel_i810_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_i810_masks; + agp_bridge->aperture_sizes = (void *) intel_i830_sizes; + agp_bridge->size_type = FIXED_APER_SIZE; + agp_bridge->num_aperture_sizes = 2; + + agp_bridge->dev_private_data = (void *) &intel_i830_private; + agp_bridge->needs_scratch_page = TRUE; + + agp_bridge->configure = intel_i830_configure; + agp_bridge->fetch_size = intel_i830_fetch_size; + agp_bridge->cleanup = intel_i830_cleanup; + agp_bridge->tlb_flush = intel_i810_tlbflush; + agp_bridge->mask_memory = intel_i810_mask_memory; + agp_bridge->agp_enable = intel_i810_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + + agp_bridge->create_gatt_table = intel_i830_create_gatt_table; + agp_bridge->free_gatt_table = intel_i830_free_gatt_table; + + agp_bridge->insert_memory = intel_i830_insert_entries; + agp_bridge->remove_memory = intel_i830_remove_entries; + agp_bridge->alloc_by_type = intel_i830_alloc_by_type; + agp_bridge->free_by_type = intel_i810_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return(0); } @@ -564,13 +564,13 @@ u16 temp; struct aper_size_info_16 *values; - pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); - values = A_SIZE_16(agp_bridge.aperture_sizes); + pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); + values = A_SIZE_16(agp_bridge->aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -584,20 +584,20 @@ u8 temp; struct aper_size_info_8 *values; - pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp); + pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp); /* Intel 815 chipsets have a _weird_ APSIZE register with only * one non-reserved bit, so mask the others out ... */ - if (agp_bridge.type == INTEL_I815) + if (agp_bridge->type == INTEL_I815) temp &= (1 << 3); - values = A_SIZE_8(agp_bridge.aperture_sizes); + values = A_SIZE_8(agp_bridge->aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -607,18 +607,18 @@ static void intel_tlbflush(agp_memory * mem) { - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); } static void intel_8xx_tlbflush(agp_memory * mem) { u32 temp; - pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7)); - pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7)); + pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7)); + pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7)); } @@ -627,10 +627,10 @@ u16 temp; struct aper_size_info_16 *previous_size; - previous_size = A_SIZE_16(agp_bridge.previous_size); - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value); + previous_size = A_SIZE_16(agp_bridge->previous_size); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } @@ -639,10 +639,10 @@ u16 temp; struct aper_size_info_8 *previous_size; - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value); + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } @@ -652,27 +652,27 @@ u16 temp2; struct aper_size_info_16 *current_size; - current_size = A_SIZE_16(agp_bridge.current_size); + current_size = A_SIZE_16(agp_bridge->current_size); /* aperture size */ - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9)); /* clear any possible error conditions */ - pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); + pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); return 0; } @@ -682,32 +682,32 @@ u8 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ /* the Intel 815 chipset spec. says that bits 29-31 in the * ATTBASE register are reserved -> try not to write them */ - if (agp_bridge.gatt_bus_addr & INTEL_815_ATTBASE_MASK) + if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) panic("gatt bus addr too high"); - pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr); + pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr); addr &= INTEL_815_ATTBASE_MASK; - addr |= agp_bridge.gatt_bus_addr; - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr); + addr |= agp_bridge->gatt_bus_addr; + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* apcont */ - pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT, temp2 | (1 << 1)); + pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1)); /* clear any possible error conditions */ /* Oddness : this chipset seems to have no ERRSTS register ! */ @@ -724,11 +724,11 @@ u8 temp; struct aper_size_info_8 *previous_size; - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp); + pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp & ~(1 << 1)); - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } @@ -739,28 +739,28 @@ u8 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* global enable aperture access */ /* This flag is not accessed through MCHCFG register as in */ /* i850 chipset. */ - pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, temp2 | (1 << 1)); + pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c); return 0; } @@ -770,26 +770,26 @@ u16 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); + pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); + pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000); return 0; } @@ -799,26 +799,26 @@ u8 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* agpm */ - pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM, temp2 | (1 << 1)); + pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2); + pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); return 0; } @@ -833,26 +833,26 @@ u16 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); + pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c); return 0; } @@ -862,26 +862,26 @@ u16 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, temp2 | (1 << 9)); + pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700); + pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700); return 0; } @@ -891,33 +891,33 @@ u16 temp2; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, current_size->size_value); + pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, INTEL_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, agp_bridge.gatt_bus_addr); + pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000); /* gmch */ - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp2 | (1 << 9)); + pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c); + pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c); return 0; } static unsigned long intel_mask_memory(unsigned long addr, int type) { /* Memory type is ignored */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } static void intel_resume(void) @@ -969,234 +969,234 @@ static int __init intel_generic_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_generic_sizes; - agp_bridge.size_type = U16_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_configure; - agp_bridge.fetch_size = intel_fetch_size; - agp_bridge.cleanup = intel_cleanup; - agp_bridge.tlb_flush = intel_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = intel_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_generic_sizes; + agp_bridge->size_type = U16_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_configure; + agp_bridge->fetch_size = intel_fetch_size; + agp_bridge->cleanup = intel_cleanup; + agp_bridge->tlb_flush = intel_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = intel_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_815_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_815_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_815_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_815_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 2; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_815_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_820_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_820_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_820_cleanup; - agp_bridge.tlb_flush = intel_820_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_820_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_820_cleanup; + agp_bridge->tlb_flush = intel_820_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_830mp_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 4; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_830mp_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_830mp_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 4; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_830mp_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_840_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_840_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_840_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_845_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_845_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = intel_845_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_845_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = intel_845_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_850_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_850_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_850_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } static int __init intel_860_setup (struct pci_dev *pdev) { - agp_bridge.masks = intel_generic_masks; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_860_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = intel_generic_masks; + agp_bridge->aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = intel_860_configure; + agp_bridge->fetch_size = intel_8xx_fetch_size; + agp_bridge->cleanup = intel_8xx_cleanup; + agp_bridge->tlb_flush = intel_8xx_tlbflush; + agp_bridge->mask_memory = intel_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -1287,7 +1287,7 @@ if (pdev->device == devs[j].device_id) { printk (KERN_INFO PFX "Detected Intel %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = devs[j].chipset; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -1302,7 +1302,7 @@ if (agp_try_unsupported) { printk(KERN_WARNING PFX "Trying generic Intel routines" " for device id: %04x\n", pdev->device); - agp_bridge.type = INTEL_GENERIC; + agp_bridge->type = INTEL_GENERIC; return intel_generic_setup(pdev); } @@ -1319,7 +1319,7 @@ struct pci_dev *i810_dev; u8 cap_ptr = 0; - agp_bridge.dev = dev; + agp_bridge->dev = dev; /* This shit needs moving into tables/init-routines. */ switch (dev->device) { @@ -1331,7 +1331,7 @@ return -ENODEV; } printk(KERN_INFO PFX "Detected an Intel i810 Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i810_setup (i810_dev); case PCI_DEVICE_ID_INTEL_82810_MC3: @@ -1342,7 +1342,7 @@ return -ENODEV; } printk(KERN_INFO PFX "Detected an Intel i810 DC100 Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i810_setup(i810_dev); case PCI_DEVICE_ID_INTEL_82810E_MC: @@ -1353,7 +1353,7 @@ return -ENODEV; } printk(KERN_INFO PFX "Detected an Intel i810 E Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i810_setup(i810_dev); case PCI_DEVICE_ID_INTEL_82815_MC: @@ -1371,7 +1371,7 @@ break; } printk(KERN_INFO PFX "agpgart: Detected an Intel i815 Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i810_setup(i810_dev); case PCI_DEVICE_ID_INTEL_82845G_HB: @@ -1387,11 +1387,11 @@ * We probably have a I845MP chipset with an external graphics * card. It will be initialized later */ - agp_bridge.type = INTEL_I845_G; + agp_bridge->type = INTEL_I845_G; break; } printk(KERN_INFO PFX "Detected an Intel 845G Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i830_setup(i810_dev); case PCI_DEVICE_ID_INTEL_82830_HB: @@ -1402,11 +1402,11 @@ if (i810_dev == NULL) { /* Intel 830MP with external graphic card */ /* It will be initialized later */ - agp_bridge.type = INTEL_I830_M; + agp_bridge->type = INTEL_I830_M; break; } printk(KERN_INFO PFX "Detected an Intel 830M Chipset.\n"); - agp_bridge.type = INTEL_I810; + agp_bridge->type = INTEL_I810; return intel_i830_setup(i810_dev); default: @@ -1416,10 +1416,10 @@ cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); if (cap_ptr == 0) return -ENODEV; - agp_bridge.capndx = cap_ptr; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); /* probe for known chipsets */ return agp_lookup_host_bridge(dev); @@ -1472,7 +1472,7 @@ ret_val = pci_module_init(&agp_intel_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c --- a/drivers/char/agp/sis-agp.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/sis-agp.c Wed Feb 12 14:47:57 2003 @@ -16,16 +16,16 @@ int i; struct aper_size_info_8 *values; - pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); - values = A_SIZE_8(agp_bridge.aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size); + values = A_SIZE_8(agp_bridge->aperture_sizes); + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if ((temp_size == values[i].size_value) || ((temp_size & ~(0x03)) == (values[i].size_value & ~(0x03)))) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -35,7 +35,7 @@ static void sis_tlbflush(agp_memory * mem) { - pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); + pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02); } static int sis_configure(void) @@ -43,13 +43,13 @@ u32 temp; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); - pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); - pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, - agp_bridge.gatt_bus_addr); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + current_size = A_SIZE_8(agp_bridge->current_size); + pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05); + pci_read_config_dword(agp_bridge->dev, SIS_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE, + agp_bridge->gatt_bus_addr); + pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, current_size->size_value); return 0; } @@ -58,8 +58,8 @@ { struct aper_size_info_8 *previous_size; - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, SIS_APSIZE, (previous_size->size_value & ~(0x03))); } @@ -67,7 +67,7 @@ { /* Memory type is ignored */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } static struct aper_size_info_8 sis_generic_sizes[7] = @@ -88,30 +88,30 @@ static int __init sis_generic_setup (struct pci_dev *pdev) { - agp_bridge.masks = sis_generic_masks; - agp_bridge.aperture_sizes = (void *) sis_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = sis_configure; - agp_bridge.fetch_size = sis_fetch_size; - agp_bridge.cleanup = sis_cleanup; - agp_bridge.tlb_flush = sis_tlbflush; - agp_bridge.mask_memory = sis_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = sis_generic_masks; + agp_bridge->aperture_sizes = (void *) sis_generic_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = sis_configure; + agp_bridge->fetch_size = sis_fetch_size; + agp_bridge->cleanup = sis_cleanup; + agp_bridge->tlb_flush = sis_tlbflush; + agp_bridge->mask_memory = sis_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } @@ -198,7 +198,7 @@ if (pdev->device == devs[j].device_id) { printk (KERN_INFO PFX "Detected SiS %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = devs[j].chipset; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -212,7 +212,7 @@ if (agp_try_unsupported) { printk(KERN_WARNING PFX "Trying generic SiS routines" " for device id: %04x\n", pdev->device); - agp_bridge.type = SIS_GENERIC; + agp_bridge->type = SIS_GENERIC; return sis_generic_setup(pdev); } @@ -235,10 +235,10 @@ /* probe for known chipsets */ if (agp_lookup_host_bridge(dev) != -ENODEV) { - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); sis_agp_driver.dev = dev; agp_register_driver(&sis_agp_driver); return 0; @@ -272,7 +272,7 @@ ret_val = pci_module_init(&agp_sis_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c --- a/drivers/char/agp/sworks-agp.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/agp/sworks-agp.c Wed Feb 12 14:47:57 2003 @@ -47,7 +47,7 @@ CACHE_FLUSH(); for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { - page_map->remapped[i] = agp_bridge.scratch_page; + page_map->remapped[i] = agp_bridge->scratch_page; } return 0; @@ -120,7 +120,7 @@ #ifndef GET_PAGE_DIR_IDX #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) + GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #endif #ifndef GET_GATT_OFF @@ -135,7 +135,7 @@ u32 temp; int i; - value = A_SIZE_LVL2(agp_bridge.current_size); + value = A_SIZE_LVL2(agp_bridge->current_size); retval = serverworks_create_page_map(&page_dir); if (retval != 0) { return retval; @@ -147,7 +147,7 @@ } /* Create a fake scratch directory */ for(i = 0; i < 1024; i++) { - serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page; + serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge->scratch_page; page_dir.remapped[i] = virt_to_phys(serverworks_private.scratch_dir.real); page_dir.remapped[i] |= 0x00000001; @@ -160,17 +160,17 @@ return retval; } - agp_bridge.gatt_table_real = (u32 *)page_dir.real; - agp_bridge.gatt_table = (u32 *)page_dir.remapped; - agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); + agp_bridge->gatt_table_real = (u32 *)page_dir.real; + agp_bridge->gatt_table = (u32 *)page_dir.remapped; + agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. * This is a bus address even on the alpha, b/c its * used to program the agp master not the cpu */ - pci_read_config_dword(agp_bridge.dev,serverworks_private.gart_addr_ofs,&temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* Calculate the agp offset */ @@ -187,8 +187,8 @@ { struct serverworks_page_map page_dir; - page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; - page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; + page_dir.real = (unsigned long *)agp_bridge->gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge->gatt_table; serverworks_free_gatt_pages(); serverworks_free_page_map(&page_dir); @@ -203,20 +203,20 @@ u32 temp2; struct aper_size_info_lvl2 *values; - values = A_SIZE_LVL2(agp_bridge.aperture_sizes); - pci_read_config_dword(agp_bridge.dev,serverworks_private.gart_addr_ofs,&temp); - pci_write_config_dword(agp_bridge.dev,serverworks_private.gart_addr_ofs, + values = A_SIZE_LVL2(agp_bridge->aperture_sizes); + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp); + pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs, SVWRKS_SIZE_MASK); - pci_read_config_dword(agp_bridge.dev,serverworks_private.gart_addr_ofs,&temp2); - pci_write_config_dword(agp_bridge.dev,serverworks_private.gart_addr_ofs,temp); + pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2); + pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp); temp2 &= SVWRKS_SIZE_MASK; - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp2 == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->aperture_size_idx = i; return values[i].size; } } @@ -231,17 +231,17 @@ u8 enable_reg; u16 cap_reg; - current_size = A_SIZE_LVL2(agp_bridge.current_size); + current_size = A_SIZE_LVL2(agp_bridge->current_size); /* Get the memory mapped registers */ - pci_read_config_dword(agp_bridge.dev, serverworks_private.mm_addr_ofs, &temp); + pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096); OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a); OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge->gatt_bus_addr); cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND); cap_reg &= ~0x0007; @@ -253,21 +253,21 @@ enable_reg |= 0x1; /* Agp Enable bit */ pci_write_config_byte(serverworks_private.svrwrks_dev, SVWRKS_AGP_ENABLE, enable_reg); - agp_bridge.tlb_flush(NULL); + agp_bridge->tlb_flush(NULL); - agp_bridge.capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); + agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP); /* Fill in the mode register */ pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); - pci_read_config_byte(agp_bridge.dev, SVWRKS_CACHING, &enable_reg); + pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg); enable_reg &= ~0x3; - pci_write_config_byte(agp_bridge.dev, SVWRKS_CACHING, enable_reg); + pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg); - pci_read_config_byte(agp_bridge.dev, SVWRKS_FEATURE, &enable_reg); + pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg); enable_reg |= (1<<6); - pci_write_config_byte(agp_bridge.dev,SVWRKS_FEATURE, enable_reg); + pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg); return 0; } @@ -313,7 +313,7 @@ { /* Only type 0 is supported by the serverworks chipsets */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } static int serverworks_insert_memory(agp_memory * mem, @@ -323,7 +323,7 @@ unsigned long *cur_gatt; unsigned long addr; - num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; + num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if (type != 0 || mem->type != 0) { return -EINVAL; @@ -334,7 +334,7 @@ j = pg_start; while (j < (pg_start + mem->page_count)) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { return -EBUSY; @@ -348,12 +348,12 @@ } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); cur_gatt[GET_GATT_OFF(addr)] = - agp_bridge.mask_memory(mem->memory[i], mem->type); + agp_bridge->mask_memory(mem->memory[i], mem->type); } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -369,16 +369,16 @@ } CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); for (i = pg_start; i < (mem->page_count + pg_start); i++) { - addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; + addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; + (unsigned long) agp_bridge->scratch_page; } - agp_bridge.tlb_flush(mem); + agp_bridge->tlb_flush(mem); return 0; } @@ -403,7 +403,7 @@ u32 command; pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + PCI_AGP_STATUS, + agp_bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(mode, command); @@ -414,7 +414,7 @@ command |= 0x100; pci_write_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + PCI_AGP_COMMAND, + agp_bridge->capndx + PCI_AGP_COMMAND, command); agp_device_command(command, 0); @@ -427,39 +427,39 @@ serverworks_private.svrwrks_dev = pdev; - agp_bridge.masks = serverworks_masks; - agp_bridge.aperture_sizes = (void *) serverworks_sizes; - agp_bridge.size_type = LVL2_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = (void *) &serverworks_private; - agp_bridge.needs_scratch_page = TRUE; - agp_bridge.configure = serverworks_configure; - agp_bridge.fetch_size = serverworks_fetch_size; - agp_bridge.cleanup = serverworks_cleanup; - agp_bridge.tlb_flush = serverworks_tlbflush; - agp_bridge.mask_memory = serverworks_mask_memory; - agp_bridge.agp_enable = serverworks_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = serverworks_create_gatt_table; - agp_bridge.free_gatt_table = serverworks_free_gatt_table; - agp_bridge.insert_memory = serverworks_insert_memory; - agp_bridge.remove_memory = serverworks_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; + agp_bridge->masks = serverworks_masks; + agp_bridge->aperture_sizes = (void *) serverworks_sizes; + agp_bridge->size_type = LVL2_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = (void *) &serverworks_private; + agp_bridge->needs_scratch_page = TRUE; + agp_bridge->configure = serverworks_configure; + agp_bridge->fetch_size = serverworks_fetch_size; + agp_bridge->cleanup = serverworks_cleanup; + agp_bridge->tlb_flush = serverworks_tlbflush; + agp_bridge->mask_memory = serverworks_mask_memory; + agp_bridge->agp_enable = serverworks_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = serverworks_create_gatt_table; + agp_bridge->free_gatt_table = serverworks_free_gatt_table; + agp_bridge->insert_memory = serverworks_insert_memory; + agp_bridge->remove_memory = serverworks_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; - pci_read_config_dword(agp_bridge.dev, + pci_read_config_dword(agp_bridge->dev, SVWRKS_APSIZE, &temp); serverworks_private.gart_addr_ofs = 0x10; if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_read_config_dword(agp_bridge.dev, + pci_read_config_dword(agp_bridge->dev, SVWRKS_APSIZE + 4, &temp2); if(temp2 != 0) { @@ -472,11 +472,11 @@ serverworks_private.mm_addr_ofs = 0x14; } - pci_read_config_dword(agp_bridge.dev, + pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp); if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_read_config_dword(agp_bridge.dev, + pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs + 4, &temp2); if(temp2 != 0) { @@ -503,21 +503,21 @@ return -ENODEV; } - agp_bridge.dev = dev; + agp_bridge->dev = dev; switch (dev->device) { case PCI_DEVICE_ID_SERVERWORKS_HE: - agp_bridge.type = SVWRKS_HE; + agp_bridge->type = SVWRKS_HE; return serverworks_setup(bridge_dev); case PCI_DEVICE_ID_SERVERWORKS_LE: case 0x0007: - agp_bridge.type = SVWRKS_LE; + agp_bridge->type = SVWRKS_LE; return serverworks_setup(bridge_dev); default: if(agp_try_unsupported) { - agp_bridge.type = SVWRKS_GENERIC; + agp_bridge->type = SVWRKS_GENERIC; return serverworks_setup(bridge_dev); } break; @@ -565,7 +565,7 @@ ret_val = pci_module_init(&agp_serverworks_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } diff -Nru a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c --- a/drivers/char/agp/via-agp.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/agp/via-agp.c Wed Feb 12 14:47:58 2003 @@ -11,73 +11,78 @@ static int agp_try_unsupported __initdata = 0; + static int via_fetch_size(void) { int i; u8 temp; struct aper_size_info_8 *values; - values = A_SIZE_8(agp_bridge.aperture_sizes); - pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + values = A_SIZE_8(agp_bridge->aperture_sizes); + pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp); + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; return values[i].size; } } - return 0; } + static int via_configure(void) { u32 temp; struct aper_size_info_8 *current_size; - current_size = A_SIZE_8(agp_bridge.current_size); + current_size = A_SIZE_8(agp_bridge->current_size); /* aperture size */ - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, current_size->size_value); /* address to map too */ - pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(agp_bridge->dev, VIA_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* GART control register */ - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); /* attbase - aperture GATT base */ - pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, - (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); + pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE, + (agp_bridge->gatt_bus_addr & 0xfffff000) | 3); return 0; } + static void via_cleanup(void) { struct aper_size_info_8 *previous_size; - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + previous_size = A_SIZE_8(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value); /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up * during reinitialization. */ } + static void via_tlbflush(agp_memory * mem) { - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000008f); + pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f); } + static unsigned long via_mask_memory(unsigned long addr, int type) { /* Memory type is ignored */ - return addr | agp_bridge.masks[0].mask; + return addr | agp_bridge->masks[0].mask; } + static struct aper_size_info_8 via_generic_sizes[7] = { {256, 65536, 6, 0}, @@ -89,123 +94,327 @@ {4, 1024, 0, 252} }; + static struct gatt_mask via_generic_masks[] = { {.mask = 0x00000000, .type = 0} }; + +#ifdef CONFIG_AGP3 +static int via_fetch_size_agp3(void) +{ + int i; + u16 temp; + struct aper_size_info_16 *values; + + values = A_SIZE_16(agp_bridge->aperture_sizes); + pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp); + temp &= 0xfff; + + for (i = 0; i < agp_bridge->num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge->previous_size = + agp_bridge->current_size = (void *) (values + i); + agp_bridge->aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + + +static int via_configure_agp3(void) +{ + u32 temp; + struct aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge->current_size); + + /* address to map too */ + pci_read_config_dword(agp_bridge->dev, VIA_APBASE, &temp); + agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE, + agp_bridge->gatt_bus_addr & 0xfffff000); + return 0; +} + + +static void via_cleanup_agp3(void) +{ + struct aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge->previous_size); + pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value); +} + + +static void via_tlbflush_agp3(agp_memory * mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp); + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7)); + pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp); +} + + +static struct aper_size_info_16 via_generic_agp3_sizes[11] = +{ + { 4, 1024, 0, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1|1<<0 }, + { 8, 2048, 1, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1}, + { 16, 4096, 2, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2}, + { 32, 8192, 3, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3}, + { 64, 16384, 4, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4}, + { 128, 32768, 5, 1<<11|1<<10|1<<9|1<<8|1<<5}, + { 256, 65536, 6, 1<<11|1<<10|1<<9|1<<8}, + { 512, 131072, 7, 1<<11|1<<10|1<<9}, + { 1024, 262144, 8, 1<<11|1<<10}, + { 2048, 524288, 9, 1<<11} /* 2GB <- Max supported */ +}; + + +static int __init via_generic_agp3_setup (struct pci_dev *pdev) +{ + agp_bridge->dev = pdev; + agp_bridge->type = VIA_GENERIC; + agp_bridge->masks = via_generic_masks; + agp_bridge->aperture_sizes = (void *) via_generic_agp3_sizes; + agp_bridge->size_type = U16_APER_SIZE; + agp_bridge->num_aperture_sizes = 10; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->agp_enable = agp_generic_agp_3_0_enable; + agp_bridge->configure = via_configure_agp3; + agp_bridge->fetch_size = via_fetch_size_agp3; + agp_bridge->cleanup = via_cleanup_agp3; + agp_bridge->tlb_flush = via_tlbflush_agp3; + agp_bridge->mask_memory = via_mask_memory; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; + return 0; +} +#else +static int __init via_generic_agp3_setup (struct pci_dev *pdev) +{ + printk (KERN_INFO PFX "Bridge in AGP3 mode, but CONFIG_AGP3=n\n"); + return -ENODEV; +} +#endif /* CONFIG_AGP3 */ + + static int __init via_generic_setup (struct pci_dev *pdev) { - agp_bridge.masks = via_generic_masks; - agp_bridge.aperture_sizes = (void *) via_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = via_configure; - agp_bridge.fetch_size = via_fetch_size; - agp_bridge.cleanup = via_cleanup; - agp_bridge.tlb_flush = via_tlbflush; - agp_bridge.mask_memory = via_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; +#ifdef CONFIG_AGP3 + /* Garg, there are KT400s with KT266 IDs. */ + if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) { + + /* Is there a KT400 subsystem ? */ + if (pdev->subsystem_device==PCI_DEVICE_ID_VIA_8377_0) { + u8 reg; + + printk (KERN_INFO PFX "Found KT400 in disguise as a KT266.\n"); + + /* Check AGP compatability mode. */ + pci_read_config_byte(pdev, VIA_AGPSEL, ®); + if ((reg & (1<<1))==0) + return via_generic_agp3_setup(pdev); + + /* Its in 2.0 mode, drop through. */ + } + } +#endif + + agp_bridge->masks = via_generic_masks; + agp_bridge->aperture_sizes = (void *) via_generic_sizes; + agp_bridge->size_type = U8_APER_SIZE; + agp_bridge->num_aperture_sizes = 7; + agp_bridge->dev_private_data = NULL; + agp_bridge->needs_scratch_page = FALSE; + agp_bridge->configure = via_configure; + agp_bridge->fetch_size = via_fetch_size; + agp_bridge->cleanup = via_cleanup; + agp_bridge->tlb_flush = via_tlbflush; + agp_bridge->mask_memory = via_mask_memory; + agp_bridge->agp_enable = agp_generic_agp_enable; + agp_bridge->cache_flush = global_cache_flush; + agp_bridge->create_gatt_table = agp_generic_create_gatt_table; + agp_bridge->free_gatt_table = agp_generic_free_gatt_table; + agp_bridge->insert_memory = agp_generic_insert_memory; + agp_bridge->remove_memory = agp_generic_remove_memory; + agp_bridge->alloc_by_type = agp_generic_alloc_by_type; + agp_bridge->free_by_type = agp_generic_free_by_type; + agp_bridge->agp_alloc_page = agp_generic_alloc_page; + agp_bridge->agp_destroy_page = agp_generic_destroy_page; + agp_bridge->suspend = agp_generic_suspend; + agp_bridge->resume = agp_generic_resume; + agp_bridge->cant_use_aperture = 0; return 0; } -/* - * The KT400 does magick to put the AGP bridge compliant with the same - * standards version as the graphics card. If we haven't fallen into - * 2.0 compatability mode, we abort, as this gets picked up by - * via-agp3.o - */ +/* The KT400 does magick to put the AGP bridge compliant with the same + * standards version as the graphics card. */ static int __init via_kt400_setup(struct pci_dev *pdev) { u8 reg; pci_read_config_byte(pdev, VIA_AGPSEL, ®); /* Check AGP 2.0 compatability mode. */ - if ((reg & (1<<1))==1) { - via_generic_setup(pdev); - return 0; - } - return -ENODEV; + if ((reg & (1<<1))==0) + return via_generic_agp3_setup(pdev); + return via_generic_setup(pdev); } + static struct agp_device_ids via_agp_device_ids[] __initdata = { { .device_id = PCI_DEVICE_ID_VIA_82C597_0, - .chipset = VIA_VP3, .chipset_name = "VP3", }, + { .device_id = PCI_DEVICE_ID_VIA_82C598_0, - .chipset = VIA_MVP3, .chipset_name = "MVP3", }, + { .device_id = PCI_DEVICE_ID_VIA_8501_0, - .chipset = VIA_MVP4, .chipset_name = "MVP4", }, + + /* VT8601 */ + { + .device_id = PCI_DEVICE_ID_VIA_8601_0, + .chipset_name = "PLE133 ProMedia", + }, + + /* VT82C693A / VT28C694T */ { .device_id = PCI_DEVICE_ID_VIA_82C691, - .chipset = VIA_APOLLO_PRO, - .chipset_name = "Apollo Pro", + .chipset_name = "Apollo Pro 133", }, + { .device_id = PCI_DEVICE_ID_VIA_8371_0, - .chipset = VIA_APOLLO_KX133, .chipset_name = "Apollo Pro KX133", }, + + /* VT8633 */ { .device_id = PCI_DEVICE_ID_VIA_8633_0, - .chipset = VIA_APOLLO_PRO_266, .chipset_name = "Apollo Pro 266", }, + + /* VT8361 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_8361, // 0x3112 + .chipset_name = "Apollo KLE133", + }, */ + + /* VT8365 / VT8362 */ { .device_id = PCI_DEVICE_ID_VIA_8363_0, - .chipset = VIA_APOLLO_KT133, - .chipset_name = "Apollo Pro KT133", + .chipset_name = "Apollo Pro KT133/KM133/TwisterK", }, + + /* VT8753A */ +/* { + .device_id = PCI_DEVICE_ID_VIA_8753_0, // 0x3128 + .chipset_name = "P4X266", + }, */ + + /* VT8366 */ { .device_id = PCI_DEVICE_ID_VIA_8367_0, - .chipset = VIA_APOLLO_KT133, - .chipset_name = "Apollo Pro KT266", + .chipset_name = "Apollo Pro KT266/KT333", }, + + /* VT8633 (for CuMine/ Celeron) */ { .device_id = PCI_DEVICE_ID_VIA_8653_0, - .chipset = VIA_APOLLO_PRO, .chipset_name = "Apollo Pro 266T", }, + + /* KM266 / PM266 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_KM266, // 0x3116 + .chipset_name = "KM266/PM266", + }, */ + + /* CLE266 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_CLE266, // 0x3123 + .chipset_name = "CLE266", + }, */ + { .device_id = PCI_DEVICE_ID_VIA_8377_0, - .chipset = VIA_APOLLO_KT400, .chipset_name = "Apollo Pro KT400", .chipset_setup = via_kt400_setup, }, + + /* VT8604 / VT8605 / VT8603 / TwisterT + * (Apollo Pro133A chipset with S3 Savage4) */ { - /* VIA ProSavage PM133 (Apollo Pro133A chipset with S3 Savage4) */ .device_id = PCI_DEVICE_ID_VIA_82C694X_0, - .chipset = VIA_VT8605, - .chipset_name = "Apollo ProSavage PM133" + .chipset_name = "Apollo ProSavage PM133/PL133/PN133/Twister" }, + + /* VT8752*/ +/* { + .device_id = PCI_DEVICE_ID_VIA_8752, // 0x3148 + .chipset_name = "ProSavage DDR P4M266", + }, */ + + /* KN266/PN266 */ +/* { + .device_id = PCI_DEVICE_ID_KN266, // 0x3156 + .chipset_name = "KN266/PN266", + }, */ + + /* VT8754 */ { .device_id = PCI_DEVICE_ID_VIA_8754, - .chipset = VIA_P4X, .chipset_name = "Apollo P4X333/P4X400" }, + + /* P4N333 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_P4N333, // 0x3178 + .chipset_name = "P4N333", + }, */ + + /* P4X600 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_P4X600, // 0x0198 + .chipset_name = "P4X600", + }, */ + + /* KM400 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_KM400, // 0x3205 + .chipset_name = "KM400", + }, */ + + /* P4M400 */ +/* { + .device_id = PCI_DEVICE_ID_VIA_P4M400, // 0x3209 + .chipset_name = "PM400", + }, */ + { }, /* dummy final entry, always present */ }; @@ -221,7 +430,7 @@ while (devs[j].chipset_name != NULL) { if (pdev->device == devs[j].device_id) { printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name); - agp_bridge.type = devs[j].chipset; + agp_bridge->type = VIA_GENERIC; if (devs[j].chipset_setup != NULL) return devs[j].chipset_setup(pdev); @@ -235,7 +444,7 @@ if (agp_try_unsupported) { printk(KERN_WARNING PFX "Trying generic VIA routines" " for device id: %04x\n", pdev->device); - agp_bridge.type = VIA_GENERIC; + agp_bridge->type = VIA_GENERIC; return via_generic_setup(pdev); } @@ -244,10 +453,12 @@ return -ENODEV; } + static struct agp_driver via_agp_driver = { .owner = THIS_MODULE, }; + static int __init agp_via_probe (struct pci_dev *dev, const struct pci_device_id *ent) { u8 cap_ptr = 0; @@ -258,10 +469,10 @@ /* probe for known chipsets */ if (agp_lookup_host_bridge (dev) != -ENODEV) { - agp_bridge.dev = dev; - agp_bridge.capndx = cap_ptr; + agp_bridge->dev = dev; + agp_bridge->capndx = cap_ptr; /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); + pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode); via_agp_driver.dev = dev; agp_register_driver(&via_agp_driver); return 0; @@ -269,6 +480,7 @@ return -ENODEV; } + static struct pci_device_id agp_via_pci_table[] __initdata = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), @@ -283,31 +495,36 @@ MODULE_DEVICE_TABLE(pci, agp_via_pci_table); + static struct __initdata pci_driver agp_via_pci_driver = { .name = "agpgart-via", .id_table = agp_via_pci_table, .probe = agp_via_probe, }; + static int __init agp_via_init(void) { int ret_val; ret_val = pci_module_init(&agp_via_pci_driver); if (ret_val) - agp_bridge.type = NOT_SUPPORTED; + agp_bridge->type = NOT_SUPPORTED; return ret_val; } + static void __exit agp_via_cleanup(void) { agp_unregister_driver(&via_agp_driver); pci_unregister_driver(&agp_via_pci_driver); } + module_init(agp_via_init); module_exit(agp_via_cleanup); MODULE_PARM(agp_try_unsupported, "1i"); MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Dave Jones "); diff -Nru a/drivers/char/agp/via-kt400.c b/drivers/char/agp/via-kt400.c --- a/drivers/char/agp/via-kt400.c Wed Feb 12 14:47:58 2003 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,202 +0,0 @@ -/* - * VIA KT400 AGPGART routines. - * - * The KT400 does magick to put the AGP bridge compliant with the same - * standards version as the graphics card. If we haven't fallen into - * 2.0 compatability mode, we run this code. Otherwise, we run the - * code in via-agp.c - */ - -#include -#include -#include -#include -#include -#include "agp.h" - -static int via_fetch_size(void) -{ - int i; - u8 temp; - struct aper_size_info_16 *values; - - values = A_SIZE_16(agp_bridge.aperture_sizes); - pci_read_config_byte(agp_bridge.dev, VIA_AGP3_APSIZE, &temp); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - return 0; -} - -static int via_configure(void) -{ - u32 temp; - struct aper_size_info_16 *current_size; - - current_size = A_SIZE_16(agp_bridge.current_size); - - /* address to map too */ - pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture GATT base */ - pci_write_config_dword(agp_bridge.dev, VIA_AGP3_ATTBASE, - agp_bridge.gatt_bus_addr & 0xfffff000); - return 0; -} - -static void via_cleanup(void) -{ - struct aper_size_info_16 *previous_size; - - previous_size = A_SIZE_16(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value); -} - -static void via_tlbflush(agp_memory * mem) -{ - u32 temp; - - pci_read_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7)); - pci_write_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, temp); -} - -static unsigned long via_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static struct aper_size_info_16 via_generic_sizes[11] = -{ - { 4, 1024, 0, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1|1<<0 }, - { 8, 2048, 1, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1}, - { 16, 4096, 2, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2}, - { 32, 8192, 3, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3}, - { 64, 16384, 4, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4}, - { 128, 32768, 5, 1<<11|1<<10|1<<9|1<<8|1<<5}, - { 256, 65536, 6, 1<<11|1<<10|1<<9|1<<8}, - { 512, 131072, 7, 1<<11|1<<10|1<<9}, - { 1024, 262144, 8, 1<<11|1<<10}, - { 2048, 524288, 9, 1<<11} /* 2GB <- Max supported */ -}; - -static struct gatt_mask via_generic_masks[] = -{ - {.mask = 0x00000000, .type = 0} -}; - - -static void __init via_kt400_enable(u32 mode) -{ - if ((agp_generic_agp_3_0_enable(mode))==FALSE) - printk (KERN_INFO PFX "agp_generic_agp_3_0_enable() failed\n"); -} - -static struct agp_driver via_kt400_agp_driver = { - .owner = THIS_MODULE, -}; - -static int __init agp_via_probe (struct pci_dev *dev, const struct pci_device_id *ent) -{ - u8 reg; - u8 cap_ptr = 0; - - cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); - if (cap_ptr == 0) - return -ENODEV; - - pci_read_config_byte(dev, VIA_AGPSEL, ®); - /* Check if we are in AGP 2.0 compatability mode, if so it - * will be picked up by via-agp.o */ - if ((reg & (1<<1))==1) - return -ENODEV; - - printk (KERN_INFO PFX "Detected VIA KT400 AGP3 chipset\n"); - - agp_bridge.dev = dev; - agp_bridge.type = VIA_APOLLO_KT400_3; - agp_bridge.capndx = cap_ptr; - agp_bridge.masks = via_generic_masks; - agp_bridge.aperture_sizes = (void *) via_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.agp_enable = via_kt400_enable; - agp_bridge.configure = via_configure; - agp_bridge.fetch_size = via_fetch_size; - agp_bridge.cleanup = via_cleanup; - agp_bridge.tlb_flush = via_tlbflush; - agp_bridge.mask_memory = via_mask_memory; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx+PCI_AGP_STATUS, &agp_bridge.mode); - - via_kt400_agp_driver.dev = dev; - agp_register_driver(&via_kt400_agp_driver); - return 0; -} - -static struct pci_device_id agp_via_pci_table[] __initdata = { - { - .class = (PCI_CLASS_BRIDGE_HOST << 8), - .class_mask = ~0, - .vendor = PCI_VENDOR_ID_VIA, - .device = PCI_DEVICE_ID_VIA_8377_0, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - { } -}; - -MODULE_DEVICE_TABLE(pci, agp_via_pci_table); - -static struct __initdata pci_driver agp_via_pci_driver = { - .name = "agpgart-via", - .id_table = agp_via_pci_table, - .probe = agp_via_probe, -}; - -static int __init agp_via_init(void) -{ - int ret_val; - - ret_val = pci_module_init(&agp_via_pci_driver); - if (ret_val) - agp_bridge.type = NOT_SUPPORTED; - - return ret_val; -} - -static void __exit agp_via_cleanup(void) -{ - agp_unregister_driver(&via_kt400_agp_driver); - pci_unregister_driver(&agp_via_pci_driver); -} - -module_init(agp_via_init); -module_exit(agp_via_cleanup); - -MODULE_AUTHOR("Dave Jones "); -MODULE_LICENSE("GPL and additional rights"); - diff -Nru a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h --- a/drivers/char/drm/drmP.h Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/drmP.h Wed Feb 12 14:47:58 2003 @@ -230,16 +230,16 @@ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ -#define DRM_IOREMAP(map) \ - (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP(map, dev) \ + (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) ) -#define DRM_IOREMAP_NOCACHE(map) \ - (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) +#define DRM_IOREMAP_NOCACHE(map, dev) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev)) -#define DRM_IOREMAPFREE(map) \ - do { \ - if ( (map)->handle && (map)->size ) \ - DRM(ioremapfree)( (map)->handle, (map)->size ); \ +#define DRM_IOREMAPFREE(map, dev) \ + do { \ + if ( (map)->handle && (map)->size ) \ + DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ @@ -693,9 +693,10 @@ extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); -extern void *DRM(ioremap)(unsigned long offset, unsigned long size); -extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); -extern void DRM(ioremapfree)(void *pt, unsigned long size); +extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, + drm_device_t *dev); +extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); diff -Nru a/drivers/char/drm/drm_bufs.h b/drivers/char/drm/drm_bufs.h --- a/drivers/char/drm/drm_bufs.h Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/drm_bufs.h Wed Feb 12 14:47:57 2003 @@ -107,7 +107,7 @@ switch ( map->type ) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) if ( map->offset + map->size < map->offset || map->offset < virt_to_phys(high_memory) ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); @@ -124,7 +124,7 @@ MTRR_TYPE_WRCOMB, 1 ); } #endif - map->handle = DRM(ioremap)( map->offset, map->size ); + map->handle = DRM(ioremap)( map->offset, map->size, dev ); break; case _DRM_SHM: @@ -246,7 +246,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_drv.h b/drivers/char/drm/drm_drv.h --- a/drivers/char/drm/drm_drv.h Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/drm_drv.h Wed Feb 12 14:47:58 2003 @@ -443,7 +443,7 @@ DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif - DRM(ioremapfree)( map->handle, map->size ); + DRM(ioremapfree)( map->handle, map->size, dev ); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h --- a/drivers/char/drm/drm_memory.h Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/drm_memory.h Wed Feb 12 14:47:57 2003 @@ -33,6 +33,10 @@ #include #include "drmP.h" #include +#include + +#include +#include typedef struct drm_mem_stats { const char *name; @@ -291,17 +295,122 @@ } } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +#if __REALLY_HAVE_AGP + +/* + * Find the drm_map that covers the range [offset, offset+size). + */ +static inline drm_map_t * +drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev) { + struct list_head *list; + drm_map_list_t *r_list; + drm_map_t *map; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *) list; + map = r_list->map; + if (!map) + continue; + if (map->offset <= offset && (offset + size) <= (map->offset + map->size)) + return map; + } + return NULL; +} + +static inline void * +agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) +{ + unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; + struct page **page_map, **page_map_ptr; + struct drm_agp_mem *agpmem; + struct vm_struct *area; + + + size = PAGE_ALIGN(size); + + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) + if (agpmem->bound <= offset + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) + break; + if (!agpmem) + return NULL; + + /* + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by + * the CPU do not get remapped by the GART. We fix this by using the kernel's + * page-table instead (that's probably faster anyhow...). + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + + flush_cache_all(); + + /* note: use vmalloc() because num_pages could be large... */ + page_map = vmalloc(num_pages * sizeof(struct page *)); + if (!page_map) + return NULL; + + phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; + for (i = 0; i < num_pages; ++i) + page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); + page_map_ptr = page_map; + if (map_vm_area(area, PAGE_AGP, &page_map_ptr) < 0) { + vunmap(area->addr); + vfree(page_map); + return NULL; + } + vfree(page_map); + + flush_tlb_kernel_range(area->addr, area->addr + size); + return area->addr; +} + +static inline unsigned long +drm_follow_page (void *vaddr) +{ +printk("drm_follow_page: vaddr=%p\n", vaddr); + pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); +printk(" pgd=%p\n", pgd); + pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr); +printk(" pmd=%p\n", pmd); + pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); +printk(" ptep=%p\n", ptep); +printk(" page=0x%lx\n", pte_pfn(*ptep) << PAGE_SHIFT); + return pte_pfn(*ptep) << PAGE_SHIFT; +} + +#else /* !__REALLY_HAVE_AGP */ + +static inline void * +agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) { return NULL; } + +#endif /* !__REALLY_HAVE_AGP */ + +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) +{ + int remap_aperture = 0; void *pt; if (!size) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Mapping 0 bytes at 0x%08lx\n", offset); + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Mapping 0 bytes at 0x%08lx\n", offset); return NULL; } - if (!(pt = ioremap(offset, size))) { +#if __REALLY_HAVE_AGP + if (dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + remap_aperture = 1; + } +#endif + if (remap_aperture) + pt = agp_remap(offset, size, dev); + else + pt = ioremap(offset, size); + if (!pt) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -314,8 +423,9 @@ return pt; } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) { + int remap_aperture = 0; void *pt; if (!size) { @@ -324,7 +434,19 @@ return NULL; } - if (!(pt = ioremap_nocache(offset, size))) { +#if __REALLY_HAVE_AGP + if (dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + remap_aperture = 1; + } +#endif + if (remap_aperture) + pt = agp_remap(offset, size, dev); + else + pt = ioremap_nocache(offset, size); + if (!pt) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -337,16 +459,40 @@ return pt; } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { int alloc_count; int free_count; +printk("ioremapfree(pt=%p)\n", pt); if (!pt) DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Attempt to free NULL pointer\n"); - else - iounmap(pt); + else { + int unmap_aperture = 0; +#if __REALLY_HAVE_AGP + /* + * This is rather ugly. It would be much cleaner if the DRM API would use + * separate routines for handling mappings in the AGP space. Hopefully this + * can be done in a future revision of the interface... + */ + if (dev->agp->cant_use_aperture + && ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END)) + { + unsigned long offset = (drm_follow_page(pt) + | ((unsigned long) pt & ~PAGE_MASK)); +printk("offset=0x%lx\n", offset); + drm_map_t *map = drm_lookup_map(offset, size, dev); +printk("map=%p\n", map); + if (map && map->type == _DRM_AGP) + unmap_aperture = 1; + } +#endif + if (unmap_aperture) + vunmap(pt); + else + iounmap(pt); + } spin_lock(&DRM(mem_lock)); DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; diff -Nru a/drivers/char/drm/drm_vm.h b/drivers/char/drm/drm_vm.h --- a/drivers/char/drm/drm_vm.h Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/drm_vm.h Wed Feb 12 14:47:58 2003 @@ -108,12 +108,12 @@ * Get the page, inc the use count, and return it */ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - agpmem->memory->memory[offset] &= dev->agp->page_mask; page = virt_to_page(__va(agpmem->memory->memory[offset])); get_page(page); - DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n", - baddr, __va(agpmem->memory->memory[offset]), offset); + DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", + baddr, __va(agpmem->memory->memory[offset]), offset, + atomic_read(&page->count)); return page; } @@ -207,7 +207,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); @@ -421,15 +421,16 @@ switch (map->type) { case _DRM_AGP: -#if defined(__alpha__) +#if __REALLY_HAVE_AGP + if (dev->agp->cant_use_aperture) { /* - * On Alpha we can't talk to bus dma address from the - * CPU, so for memory of type DRM_AGP, we'll deal with - * sorting out the real physical pages and mappings - * in nopage() + * On some platforms we can't talk to bus dma address from the CPU, so for + * memory of type DRM_AGP, we'll deal with sorting out the real physical + * pages and mappings in nopage() */ vma->vm_ops = &DRM(vm_ops); break; + } #endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: @@ -440,15 +441,15 @@ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } -#elif defined(__ia64__) - if (map->type != _DRM_AGP) - vma->vm_page_prot = - pgprot_writecombine(vma->vm_page_prot); #elif defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; #endif vma->vm_flags |= VM_IO; /* not in core dump */ } +#if defined(__ia64__) + if (map->type != _DRM_AGP) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +#endif offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, diff -Nru a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c --- a/drivers/char/drm/gamma_dma.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/gamma_dma.c Wed Feb 12 14:47:58 2003 @@ -637,7 +637,7 @@ } else { DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->buffers, dev ); buf = dma->buflist[GLINT_DRI_BUF_COUNT]; pgt = buf->address; @@ -667,7 +667,7 @@ if ( dev->dev_private ) { drm_gamma_private_t *dev_priv = dev->dev_private; - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), DRM_MEM_DRIVER ); diff -Nru a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c --- a/drivers/char/drm/i810_dma.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/i810_dma.c Wed Feb 12 14:47:57 2003 @@ -275,7 +275,7 @@ if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if(dev_priv->hw_status_page != 0UL) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -291,7 +291,7 @@ for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); } } return 0; @@ -361,7 +361,7 @@ *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -414,7 +414,7 @@ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff -Nru a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c --- a/drivers/char/drm/i830_dma.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/i830_dma.c Wed Feb 12 14:47:57 2003 @@ -283,7 +283,7 @@ if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if(dev_priv->hw_status_page != 0UL) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -299,7 +299,7 @@ for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_i830_buf_priv_t *buf_priv = buf->dev_private; - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); } } return 0; @@ -371,7 +371,7 @@ *buf_priv->in_use = I830_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -425,7 +425,7 @@ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff -Nru a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c --- a/drivers/char/drm/mga_dma.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/mga_dma.c Wed Feb 12 14:47:58 2003 @@ -554,9 +554,9 @@ (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); - DRM_IOREMAP( dev_priv->warp ); - DRM_IOREMAP( dev_priv->primary ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->warp, dev ); + DRM_IOREMAP( dev_priv->primary, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->warp->handle || !dev_priv->primary->handle || @@ -642,9 +642,9 @@ if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; - DRM_IOREMAPFREE( dev_priv->warp ); - DRM_IOREMAPFREE( dev_priv->primary ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->warp, dev ); + DRM_IOREMAPFREE( dev_priv->primary, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); diff -Nru a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h --- a/drivers/char/drm/mga_drv.h Wed Feb 12 14:47:58 2003 +++ b/drivers/char/drm/mga_drv.h Wed Feb 12 14:47:58 2003 @@ -238,7 +238,7 @@ if ( MGA_VERBOSE ) { \ DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \ (n), __FUNCTION__ ); \ - DRM_INFO( " space=0x%x req=0x%x\n", \ + DRM_INFO( " space=0x%x req=0x%Zx\n", \ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ } \ prim = dev_priv->prim.start; \ @@ -288,7 +288,7 @@ #define DMA_WRITE( offset, val ) \ do { \ if ( MGA_VERBOSE ) { \ - DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \ + DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ (u32)(val), write + (offset) * sizeof(u32) ); \ } \ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ diff -Nru a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c --- a/drivers/char/drm/r128_cce.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/r128_cce.c Wed Feb 12 14:47:57 2003 @@ -350,8 +350,8 @@ R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", - entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", + (unsigned long) entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -540,9 +540,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cce_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cce_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -618,9 +618,9 @@ #if __REALLY_HAVE_SG if ( !dev_priv->is_pci ) { #endif - DRM_IOREMAPFREE( dev_priv->cce_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cce_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); #if __REALLY_HAVE_SG } else { if (!DRM(ati_pcigart_cleanup)( dev, diff -Nru a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c --- a/drivers/char/drm/radeon_cp.c Wed Feb 12 14:47:57 2003 +++ b/drivers/char/drm/radeon_cp.c Wed Feb 12 14:47:57 2003 @@ -904,8 +904,8 @@ RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", - entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", + (unsigned long) entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -1157,9 +1157,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cp_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cp_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -1278,9 +1278,9 @@ drm_radeon_private_t *dev_priv = dev->dev_private; if ( !dev_priv->is_pci ) { - DRM_IOREMAPFREE( dev_priv->cp_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cp_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); } else { #if __REALLY_HAVE_SG if (!DRM(ati_pcigart_cleanup)( dev, diff -Nru a/drivers/char/mem.c b/drivers/char/mem.c --- a/drivers/char/mem.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/mem.c Wed Feb 12 14:47:58 2003 @@ -528,10 +528,12 @@ case 0: file->f_pos = offset; ret = file->f_pos; + force_successful_syscall_return(); break; case 1: file->f_pos += offset; ret = file->f_pos; + force_successful_syscall_return(); break; default: ret = -EINVAL; diff -Nru a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c --- a/drivers/char/rio/rioroute.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/rio/rioroute.c Wed Feb 12 14:47:58 2003 @@ -763,7 +763,7 @@ #endif UnitId--; /* this trick relies on the Unit Id being UNSIGNED! */ - if ( UnitId > MAX_RUP ) /* dontcha just lurv unsigned maths! */ + if ( UnitId >= MAX_RUP ) /* dontcha just lurv unsigned maths! */ return(0); if ( HostP->Mapping[UnitId].Flags & BEEN_HERE ) diff -Nru a/drivers/char/specialix.c b/drivers/char/specialix.c --- a/drivers/char/specialix.c Wed Feb 12 14:47:58 2003 +++ b/drivers/char/specialix.c Wed Feb 12 14:47:58 2003 @@ -1456,7 +1456,7 @@ board = SX_BOARD(minor(tty->device)); - if (board > SX_NBOARD || !(sx_board[board].flags & SX_BOARD_PRESENT)) + if (board >= SX_NBOARD || !(sx_board[board].flags & SX_BOARD_PRESENT)) return -ENODEV; bp = &sx_board[board]; @@ -2363,7 +2363,7 @@ struct pci_dev *pdev = NULL; i=0; - while (i <= SX_NBOARD) { + while (i < SX_NBOARD) { if (sx_board[i].flags & SX_BOARD_PRESENT) { i++; continue; diff -Nru a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile --- a/drivers/media/radio/Makefile Wed Feb 12 14:47:58 2003 +++ b/drivers/media/radio/Makefile Wed Feb 12 14:47:58 2003 @@ -2,6 +2,8 @@ # Makefile for the kernel character device drivers. # +obj-y := dummy.o + miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o diff -Nru a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/radio/dummy.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/media/video/Makefile b/drivers/media/video/Makefile --- a/drivers/media/video/Makefile Wed Feb 12 14:47:57 2003 +++ b/drivers/media/video/Makefile Wed Feb 12 14:47:57 2003 @@ -6,6 +6,8 @@ bttv-risc.o bttv-vbi.o zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o +obj-y := dummy.o + obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \ diff -Nru a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/video/dummy.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c --- a/drivers/net/tulip/de4x5.c Wed Feb 12 14:47:57 2003 +++ b/drivers/net/tulip/de4x5.c Wed Feb 12 14:47:57 2003 @@ -440,8 +440,6 @@ ========================================================================= */ -static char version[] __initdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; - #include #include #include @@ -476,6 +474,8 @@ #endif /* CONFIG_PPC */ #include "de4x5.h" + +static char version[] __initdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; #define c_char const char #define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a))) diff -Nru a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c --- a/drivers/net/tulip/media.c Wed Feb 12 14:47:57 2003 +++ b/drivers/net/tulip/media.c Wed Feb 12 14:47:57 2003 @@ -278,6 +278,10 @@ for (i = 0; i < init_length; i++) outl(init_sequence[i], ioaddr + CSR12); } + + (void) inl(ioaddr + CSR6); /* flush CSR12 writes */ + udelay(500); /* Give MII time to recover */ + tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; diff -Nru a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c --- a/drivers/scsi/megaraid.c Wed Feb 12 14:47:57 2003 +++ b/drivers/scsi/megaraid.c Wed Feb 12 14:47:57 2003 @@ -2045,7 +2045,7 @@ return; mbox = (mega_mailbox *) pScb->mboxData; - printk ("%u cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n", + printk ("%lu cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n", pScb->SCpnt->pid, mbox->cmd, mbox->cmdid, mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, mbox->numsgelements); @@ -3351,9 +3351,13 @@ mbox[0] = IS_BIOS_ENABLED; mbox[2] = GET_BIOS; - mboxpnt->xferaddr = virt_to_bus ((void *) megacfg->mega_buffer); + mboxpnt->xferaddr = pci_map_single(megacfg->dev, + (void *) megacfg->mega_buffer, (2 * 1024L), + PCI_DMA_FROMDEVICE); ret = megaIssueCmd (megacfg, mbox, NULL, 0); + + pci_unmap_single(megacfg->dev, mboxpnt->xferaddr, 2 * 1024L, PCI_DMA_FROMDEVICE); return (*(char *) megacfg->mega_buffer); } diff -Nru a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c --- a/drivers/scsi/scsi_ioctl.c Wed Feb 12 14:47:58 2003 +++ b/drivers/scsi/scsi_ioctl.c Wed Feb 12 14:47:58 2003 @@ -219,6 +219,9 @@ unsigned int needed, buf_needed; int timeout, retries, result; int data_direction, gfp_mask = GFP_KERNEL; +#if __GNUC__ < 3 + int foo; +#endif if (!sic) return -EINVAL; @@ -232,11 +235,21 @@ if (verify_area(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command))) return -EFAULT; +#if __GNUC__ < 3 + foo = __get_user(inlen, &sic->inlen); + if (foo) + return -EFAULT; + + foo = __get_user(outlen, &sic->outlen); + if (foo) + return -EFAULT; +#else if(__get_user(inlen, &sic->inlen)) return -EFAULT; if(__get_user(outlen, &sic->outlen)) return -EFAULT; +#endif /* * We do not transfer more than MAX_BUF with this interface. diff -Nru a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c --- a/drivers/scsi/sym53c8xx_2/sym_glue.c Wed Feb 12 14:47:58 2003 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c Wed Feb 12 14:47:58 2003 @@ -295,11 +295,7 @@ #ifndef SYM_LINUX_DYNAMIC_DMA_MAPPING typedef u_long bus_addr_t; #else -#if SYM_CONF_DMA_ADDRESSING_MODE > 0 -typedef dma64_addr_t bus_addr_t; -#else typedef dma_addr_t bus_addr_t; -#endif #endif /* diff -Nru a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c --- a/drivers/scsi/sym53c8xx_2/sym_malloc.c Wed Feb 12 14:47:58 2003 +++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c Wed Feb 12 14:47:58 2003 @@ -143,12 +143,14 @@ a = (m_addr_t) ptr; while (1) { -#ifdef SYM_MEM_FREE_UNUSED if (s == SYM_MEM_CLUSTER_SIZE) { +#ifdef SYM_MEM_FREE_UNUSED M_FREE_MEM_CLUSTER(a); - break; - } +#else + ((m_link_p) a)->next = h[i].next; + h[i].next = (m_link_p) a; #endif + } b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_p) b) { diff -Nru a/drivers/serial/8250.c b/drivers/serial/8250.c --- a/drivers/serial/8250.c Wed Feb 12 14:47:57 2003 +++ b/drivers/serial/8250.c Wed Feb 12 14:47:57 2003 @@ -1999,9 +1999,11 @@ return __register_serial(req, -1); } -int __init early_serial_setup(struct serial_struct *req) +int __init early_serial_setup(struct uart_port *port) { - __register_serial(req, req->line); + serial8250_isa_init_ports(); + serial8250_ports[port->line].port = *port; + serial8250_ports[port->line].port.ops = &serial8250_pops; return 0; } diff -Nru a/drivers/serial/8250_acpi.c b/drivers/serial/8250_acpi.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/serial/8250_acpi.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1,178 @@ +/* + * linux/drivers/char/acpi_serial.c + * + * Copyright (C) 2000, 2002 Hewlett-Packard Co. + * Khalid Aziz + * + * Detect and initialize the headless console serial port defined in SPCR table and debug + * serial port defined in DBGP table. + * + * 2002/08/29 davidm Adjust it to new 2.5 serial driver infrastructure. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef SERIAL_DEBUG_ACPI + +#define ACPI_SERIAL_CONSOLE_PORT 0 +#define ACPI_SERIAL_DEBUG_PORT 5 + +/* + * Query ACPI tables for a debug and a headless console serial port. If found, add them to + * rs_table[]. A pointer to either SPCR or DBGP table is passed as parameter. This + * function should be called before serial_console_init() is called to make sure the SPCR + * serial console will be available for use. IA-64 kernel calls this function from within + * acpi.c when it encounters SPCR or DBGP tables as it parses the ACPI 2.0 tables during + * bootup. + */ +void __init +setup_serial_acpi (void *tablep) +{ + acpi_ser_t *acpi_ser_p; + struct uart_port port; + unsigned long iobase; + int gsi; + +#ifdef SERIAL_DEBUG_ACPI + printk("Entering setup_serial_acpi()\n"); +#endif + + /* Now get the table */ + if (!tablep) + return; + + memset(&port, 0, sizeof(port)); + + acpi_ser_p = (acpi_ser_t *) tablep; + + /* + * Perform a sanity check on the table. Table should have a signature of "SPCR" or + * "DBGP" and it should be atleast 52 bytes long. + */ + if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) != 0 && + strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) != 0) + return; + if (acpi_ser_p->length < 52) + return; + + iobase = (((u64) acpi_ser_p->base_addr.addrh) << 32) | acpi_ser_p->base_addr.addrl; + gsi = ( (acpi_ser_p->global_int[3] << 24) | (acpi_ser_p->global_int[2] << 16) + | (acpi_ser_p->global_int[1] << 8) | (acpi_ser_p->global_int[0] << 0)); + +#ifdef SERIAL_DEBUG_ACPI + printk("setup_serial_acpi(): table pointer = 0x%p\n", acpi_ser_p); + printk(" sig = '%c%c%c%c'\n", acpi_ser_p->signature[0], + acpi_ser_p->signature[1], acpi_ser_p->signature[2], acpi_ser_p->signature[3]); + printk(" length = %d\n", acpi_ser_p->length); + printk(" Rev = %d\n", acpi_ser_p->rev); + printk(" Interface type = %d\n", acpi_ser_p->intfc_type); + printk(" Base address = 0x%lX\n", iobase); + printk(" IRQ = %d\n", acpi_ser_p->irq); + printk(" Global System Int = %d\n", gsi); + printk(" Baud rate = "); + switch (acpi_ser_p->baud) { + case ACPI_SERIAL_BAUD_9600: + printk("9600\n"); + break; + + case ACPI_SERIAL_BAUD_19200: + printk("19200\n"); + break; + + case ACPI_SERIAL_BAUD_57600: + printk("57600\n"); + break; + + case ACPI_SERIAL_BAUD_115200: + printk("115200\n"); + break; + + default: + printk("Huh (%d)\n", acpi_ser_p->baud); + break; + } + if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) { + printk(" PCI serial port:\n"); + printk(" Bus %d, Device %d, Vendor ID 0x%x, Dev ID 0x%x\n", + acpi_ser_p->pci_bus, acpi_ser_p->pci_dev, + acpi_ser_p->pci_vendor_id, acpi_ser_p->pci_dev_id); + } +#endif + /* + * Now build a serial_req structure to update the entry in rs_table for the + * headless console port. + */ + switch (acpi_ser_p->intfc_type) { + case ACPI_SERIAL_INTFC_16550: + port.type = PORT_16550; + port.uartclk = BASE_BAUD * 16; + break; + + case ACPI_SERIAL_INTFC_16450: + port.type = PORT_16450; + port.uartclk = BASE_BAUD * 16; + break; + + default: + port.type = PORT_UNKNOWN; + break; + } + if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) == 0) + port.line = ACPI_SERIAL_CONSOLE_PORT; + else if (strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) == 0) + port.line = ACPI_SERIAL_DEBUG_PORT; + /* + * Check if this is an I/O mapped address or a memory mapped address + */ + if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_MEM_SPACE) { + port.iobase = 0; + port.mapbase = iobase; + port.membase = ioremap(iobase, 64); + port.iotype = SERIAL_IO_MEM; + } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_IO_SPACE) { + port.iobase = iobase; + port.mapbase = 0; + port.membase = NULL; + port.iotype = SERIAL_IO_PORT; + } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) { + printk("WARNING: No support for PCI serial console\n"); + return; + } + + /* + * If the table does not have IRQ information, use 0 for IRQ. This will force + * rs_init() to probe for IRQ. + */ + if (acpi_ser_p->length < 53) + port.irq = 0; + else { + port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ; + if (acpi_ser_p->int_type & (ACPI_SERIAL_INT_APIC | ACPI_SERIAL_INT_SAPIC)) + port.irq = gsi; + else if (acpi_ser_p->int_type & ACPI_SERIAL_INT_PCAT) + port.irq = acpi_ser_p->irq; + else + /* + * IRQ type not being set would mean UART will run in polling + * mode. Do not probe for IRQ in that case. + */ + port.flags &= UPF_AUTO_IRQ; + } + if (early_serial_setup(&port) < 0) { + printk("early_serial_setup() for ACPI serial console port failed\n"); + return; + } + +#ifdef SERIAL_DEBUG_ACPI + printk("Leaving setup_serial_acpi()\n"); +#endif +} diff -Nru a/drivers/serial/8250_hcdp.c b/drivers/serial/8250_hcdp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/serial/8250_hcdp.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1,215 @@ +/* + * linux/drivers/char/hcdp_serial.c + * + * Copyright (C) 2002 Hewlett-Packard Co. + * Khalid Aziz + * + * Parse the EFI HCDP table to locate serial console and debug ports and initialize them. + * + * 2002/08/29 davidm Adjust it to new 2.5 serial driver infrastructure (untested). + */ +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8250_hcdp.h" + +#undef SERIAL_DEBUG_HCDP + +/* + * Parse the HCDP table to find descriptions for headless console and debug serial ports + * and add them to rs_table[]. A pointer to HCDP table is passed as parameter. This + * function should be called before serial_console_init() is called to make sure the HCDP + * serial console will be available for use. IA-64 kernel calls this function from + * setup_arch() after the EFI and ACPI tables have been parsed. + */ +void __init +setup_serial_hcdp (void *tablep) +{ + hcdp_dev_t *hcdp_dev; + struct uart_port port; + unsigned long iobase; + hcdp_t hcdp; + int gsi, nr; +#if 0 + static int shift_once = 1; +#endif + +#ifdef SERIAL_DEBUG_HCDP + printk("Entering setup_serial_hcdp()\n"); +#endif + + /* Verify we have a valid table pointer */ + if (!tablep) + return; + + memset(&port, 0, sizeof(port)); + + /* + * Don't trust firmware to give us a table starting at an aligned address. Make a + * local copy of the HCDP table with aligned structures. + */ + memcpy(&hcdp, tablep, sizeof(hcdp)); + + /* + * Perform a sanity check on the table. Table should have a signature of "HCDP" + * and it should be atleast 82 bytes long to have any useful information. + */ + if ((strncmp(hcdp.signature, HCDP_SIGNATURE, HCDP_SIG_LEN) != 0)) + return; + if (hcdp.len < 82) + return; + +#ifdef SERIAL_DEBUG_HCDP + printk("setup_serial_hcdp(): table pointer = 0x%p, sig = '%.4s'\n", + tablep, hcdp.signature); + printk(" length = %d, rev = %d, ", hcdp.len, hcdp.rev); + printk("OEM ID = %.6s, # of entries = %d\n", hcdp.oemid, hcdp.num_entries); +#endif + + /* + * Parse each device entry + */ + for (nr = 0; nr < hcdp.num_entries; nr++) { + hcdp_dev = hcdp.hcdp_dev + nr; + /* + * We will parse only the primary console device which is the first entry + * for these devices. We will ignore rest of the entries for the same type + * device that has already been parsed and initialized + */ + if (hcdp_dev->type != HCDP_DEV_CONSOLE) + continue; + + iobase = ((u64) hcdp_dev->base_addr.addrhi << 32) | hcdp_dev->base_addr.addrlo; + gsi = hcdp_dev->global_int; + + /* See PCI spec v2.2, Appendix D (Class Codes): */ + switch (hcdp_dev->pci_prog_intfc) { + case 0x00: port.type = PORT_8250; break; + case 0x01: port.type = PORT_16450; break; + case 0x02: port.type = PORT_16550; break; + case 0x03: port.type = PORT_16650; break; + case 0x04: port.type = PORT_16750; break; + case 0x05: port.type = PORT_16850; break; + case 0x06: port.type = PORT_16C950; break; + default: + printk(KERN_WARNING"warning: EFI HCDP table reports unknown serial " + "programming interface 0x%02x; will autoprobe.\n", + hcdp_dev->pci_prog_intfc); + port.type = PORT_UNKNOWN; + break; + } + +#ifdef SERIAL_DEBUG_HCDP + printk(" type = %s, uart = %d\n", ((hcdp_dev->type == HCDP_DEV_CONSOLE) + ? "Headless Console" : ((hcdp_dev->type == HCDP_DEV_DEBUG) + ? "Debug port" : "Huh????")), + port.type); + printk(" base address space = %s, base address = 0x%lx\n", + ((hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE) + ? "Memory Space" : ((hcdp_dev->base_addr.space_id == ACPI_IO_SPACE) + ? "I/O space" : "PCI space")), + iobase); + printk(" gsi = %d, baud rate = %lu, bits = %d, clock = %d\n", + gsi, (unsigned long) hcdp_dev->baud, hcdp_dev->bits, hcdp_dev->clock_rate); + if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE) + printk(" PCI id: %02x:%02x:%02x, vendor ID=0x%x, dev ID=0x%x\n", + hcdp_dev->pci_seg, hcdp_dev->pci_bus, hcdp_dev->pci_dev, + hcdp_dev->pci_vendor_id, hcdp_dev->pci_dev_id); +#endif + /* + * Now fill in a port structure to update the 8250 port table.. + */ + if (hcdp_dev->clock_rate) + port.uartclk = hcdp_dev->clock_rate; + else + port.uartclk = BASE_BAUD * 16; + + /* + * Check if this is an I/O mapped address or a memory mapped address + */ + if (hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE) { + port.iobase = 0; + port.mapbase = iobase; + port.membase = ioremap(iobase, 64); + port.iotype = SERIAL_IO_MEM; + } else if (hcdp_dev->base_addr.space_id == ACPI_IO_SPACE) { + port.iobase = iobase; + port.mapbase = 0; + port.membase = NULL; + port.iotype = SERIAL_IO_PORT; + } else if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE) { + printk(KERN_WARNING"warning: No support for PCI serial console\n"); + return; + } + port.irq = gsi; + port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + if (gsi) + port.flags |= ASYNC_AUTO_IRQ; + + /* + * Note: the above memset() initializes port.line to 0, so we register + * this port as ttyS0. + */ + if (early_serial_setup(&port) < 0) { + printk("setup_serial_hcdp(): early_serial_setup() for HCDP serial " + "console port failed. Will try any additional consoles in HCDP.\n"); + continue; + } + break; + } + +#ifdef SERIAL_DEBUG_HCDP + printk("Leaving setup_serial_hcdp()\n"); +#endif +} + +#ifdef CONFIG_IA64_EARLY_PRINTK_UART +unsigned long +hcdp_early_uart (void) +{ + efi_system_table_t *systab; + efi_config_table_t *config_tables; + unsigned long addr = 0; + hcdp_t *hcdp = 0; + hcdp_dev_t *dev; + int i; + + systab = (efi_system_table_t *) ia64_boot_param->efi_systab; + if (!systab) + return 0; + systab = __va(systab); + + config_tables = (efi_config_table_t *) systab->tables; + if (!config_tables) + return 0; + config_tables = __va(config_tables); + + for (i = 0; i < systab->nr_tables; i++) { + if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { + hcdp = (hcdp_t *) config_tables[i].table; + break; + } + } + if (!hcdp) + return 0; + hcdp = __va(hcdp); + + for (i = 0, dev = hcdp->hcdp_dev; i < hcdp->num_entries; i++, dev++) { + if (dev->type == HCDP_DEV_CONSOLE) { + addr = (u64) dev->base_addr.addrhi << 32 | dev->base_addr.addrlo; + break; + } + } + return addr; +} +#endif /* CONFIG_IA64_EARLY_PRINTK_UART */ diff -Nru a/drivers/serial/8250_hcdp.h b/drivers/serial/8250_hcdp.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/serial/8250_hcdp.h Wed Feb 12 14:47:58 2003 @@ -0,0 +1,79 @@ +/* + * drivers/serial/8250_hcdp.h + * + * Copyright (C) 2002 Hewlett-Packard Co. + * Khalid Aziz + * + * Definitions for HCDP defined serial ports (Serial console and debug + * ports) + */ + +/* ACPI table signatures */ +#define HCDP_SIG_LEN 4 +#define HCDP_SIGNATURE "HCDP" + +/* Space ID as defined in ACPI generic address structure */ +#define ACPI_MEM_SPACE 0 +#define ACPI_IO_SPACE 1 +#define ACPI_PCICONF_SPACE 2 + +/* + * Maximum number of HCDP devices we want to read in + */ +#define MAX_HCDP_DEVICES 6 + +/* + * Default UART clock rate if clock rate is 0 in HCDP table. + */ +#define DEFAULT_UARTCLK 115200 + +/* + * ACPI Generic Address Structure + */ +typedef struct { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 resv; + u32 addrlo; + u32 addrhi; +} acpi_gen_addr; + +/* HCDP Device descriptor entry types */ +#define HCDP_DEV_CONSOLE 0 +#define HCDP_DEV_DEBUG 1 + +/* HCDP Device descriptor type */ +typedef struct { + u8 type; + u8 bits; + u8 parity; + u8 stop_bits; + u8 pci_seg; + u8 pci_bus; + u8 pci_dev; + u8 pci_func; + u64 baud; + acpi_gen_addr base_addr; + u16 pci_dev_id; + u16 pci_vendor_id; + u32 global_int; + u32 clock_rate; + u8 pci_prog_intfc; + u8 resv; +} hcdp_dev_t; + +/* HCDP Table format */ +typedef struct { + u8 signature[4]; + u32 len; + u8 rev; + u8 chksum; + u8 oemid[6]; + u8 oem_tabid[8]; + u32 oem_rev; + u8 creator_id[4]; + u32 creator_rev; + u32 num_entries; + hcdp_dev_t hcdp_dev[MAX_HCDP_DEVICES]; +} hcdp_t; diff -Nru a/drivers/serial/Kconfig b/drivers/serial/Kconfig --- a/drivers/serial/Kconfig Wed Feb 12 14:47:58 2003 +++ b/drivers/serial/Kconfig Wed Feb 12 14:47:58 2003 @@ -39,6 +39,13 @@ Most people will say Y or M here, so that they can use serial mice, modems and similar devices connecting to the standard serial ports. +config SERIAL_8250_ACPI + tristate "8250/16550 device discovery support via ACPI SPCR/DBGP tables" + depends on IA64 + help + Locate serial ports via the Microsoft proprietary ACPI SPCR/DBGP tables. + This table has been superseded by the EFI HCDP table. + config SERIAL_8250_CONSOLE bool "Console on 8250/16550 and compatible serial port (EXPERIMENTAL)" depends on SERIAL_8250=y @@ -76,6 +83,15 @@ The module will be called serial_cs. If you want to compile it as a module, say M here and read . If unsure, say N. + +config SERIAL_8250_HCDP + bool "8250/16550 device discovery support via EFI HCDP table" + depends on IA64 + ---help--- + If you wish to make the serial console port described by the EFI + HCDP table available for use as serial console or general + purpose port, say Y here. See + . config SERIAL_8250_EXTENDED bool "Extended 8250/16550 serial driver options" diff -Nru a/drivers/serial/Makefile b/drivers/serial/Makefile --- a/drivers/serial/Makefile Wed Feb 12 14:47:58 2003 +++ b/drivers/serial/Makefile Wed Feb 12 14:47:58 2003 @@ -8,6 +8,8 @@ serial-8250-$(CONFIG_GSC) += 8250_gsc.o serial-8250-$(CONFIG_PCI) += 8250_pci.o serial-8250-$(CONFIG_PNP) += 8250_pnp.o +serial-8250-$(CONFIG_SERIAL_8250_ACPI) += acpi.o 8250_acpi.o +serial-8250-$(CONFIG_SERIAL_8250_HCDP) += 8250_hcdp.o obj-$(CONFIG_SERIAL_CORE) += core.o obj-$(CONFIG_SERIAL_21285) += 21285.o obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y) diff -Nru a/drivers/serial/acpi.c b/drivers/serial/acpi.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/serial/acpi.c Wed Feb 12 14:47:58 2003 @@ -0,0 +1,110 @@ +/* + * serial/acpi.c + * Copyright (c) 2002-2003 Matthew Wilcox for Hewlett-Packard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include + +#include + +#include +#include + +static void acpi_serial_address(struct serial_struct *req, struct acpi_resource_address32 *addr32) +{ + unsigned long size; + + size = addr32->max_address_range - addr32->min_address_range + 1; + req->iomap_base = addr32->min_address_range; + req->iomem_base = ioremap(req->iomap_base, size); + req->io_type = SERIAL_IO_MEM; +} + +static void acpi_serial_irq(struct serial_struct *req, struct acpi_resource_ext_irq *ext_irq) +{ + if (ext_irq->number_of_interrupts > 0) { +#ifdef CONFIG_IA64 + req->irq = acpi_register_irq(ext_irq->interrupts[0], + ext_irq->active_high_low == ACPI_ACTIVE_HIGH, + ext_irq->edge_level == ACPI_EDGE_SENSITIVE); +#else + req->irq = ext_irq->interrupts[0]; +#endif + } +} + +static int acpi_serial_add(struct acpi_device *device) +{ + acpi_status result; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct serial_struct serial_req; + int line, offset = 0; + + memset(&serial_req, 0, sizeof(serial_req)); + result = acpi_get_current_resources(device->handle, &buffer); + if (ACPI_FAILURE(result)) { + result = -ENODEV; + goto out; + } + + while (offset <= buffer.length) { + struct acpi_resource *res = buffer.pointer + offset; + if (res->length == 0) + break; + offset += res->length; + if (res->id == ACPI_RSTYPE_ADDRESS32) { + acpi_serial_address(&serial_req, &res->data.address32); + } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { + acpi_serial_irq(&serial_req, &res->data.extended_irq); + } + } + + serial_req.baud_base = BASE_BAUD; + serial_req.flags = ASYNC_SKIP_TEST|ASYNC_BOOT_AUTOCONF|ASYNC_AUTO_IRQ; + + result = 0; + line = register_serial(&serial_req); + if (line < 0) + result = -ENODEV; + + out: + acpi_os_free(buffer.pointer); + return result; +} + +static int acpi_serial_remove(struct acpi_device *device, int type) +{ + return 0; +} + +static struct acpi_driver acpi_serial_driver = { + .name = "serial", + .class = "", + .ids = "PNP0501", + .ops = { + .add = acpi_serial_add, + .remove = acpi_serial_remove, + }, +}; + +static int __init acpi_serial_init(void) +{ + acpi_bus_register_driver(&acpi_serial_driver); + return 0; +} + +static void __exit acpi_serial_exit(void) +{ + acpi_bus_unregister_driver(&acpi_serial_driver); +} + +module_init(acpi_serial_init); +module_exit(acpi_serial_exit); diff -Nru a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c --- a/drivers/video/radeonfb.c Wed Feb 12 14:47:58 2003 +++ b/drivers/video/radeonfb.c Wed Feb 12 14:47:58 2003 @@ -724,7 +724,6 @@ radeon_set_backlight_level }; #endif /* CONFIG_PMAC_BACKLIGHT */ - #endif /* CONFIG_ALL_PPC */ diff -Nru a/fs/buffer.c b/fs/buffer.c --- a/fs/buffer.c Wed Feb 12 14:47:58 2003 +++ b/fs/buffer.c Wed Feb 12 14:47:58 2003 @@ -2619,6 +2619,24 @@ } /* + * For a data-integrity writeout, we need to wait upon any in-progress I/O + * and then start new I/O and then wait upon it. + */ +void sync_dirty_buffer(struct buffer_head *bh) +{ + WARN_ON(atomic_read(&bh->b_count) < 1); + lock_buffer(bh); + if (test_clear_buffer_dirty(bh)) { + get_bh(bh); + bh->b_end_io = end_buffer_io_sync; + submit_bh(WRITE, bh); + wait_on_buffer(bh); + } else { + unlock_buffer(bh); + } +} + +/* * Sanity checks for try_to_free_buffers. */ static void check_ttfb_buffer(struct page *page, struct buffer_head *bh) diff -Nru a/fs/exec.c b/fs/exec.c --- a/fs/exec.c Wed Feb 12 14:47:57 2003 +++ b/fs/exec.c Wed Feb 12 14:47:57 2003 @@ -408,7 +408,7 @@ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; mpnt->vm_end = STACK_TOP; #endif - mpnt->vm_page_prot = PAGE_COPY; + mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7]; mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_ops = NULL; mpnt->vm_pgoff = 0; diff -Nru a/fs/ext2/balloc.c b/fs/ext2/balloc.c --- a/fs/ext2/balloc.c Wed Feb 12 14:47:57 2003 +++ b/fs/ext2/balloc.c Wed Feb 12 14:47:57 2003 @@ -233,10 +233,8 @@ } mark_buffer_dirty(bitmap_bh); - if (sb->s_flags & MS_SYNCHRONOUS) { - ll_rw_block(WRITE, 1, &bitmap_bh); - wait_on_buffer(bitmap_bh); - } + if (sb->s_flags & MS_SYNCHRONOUS) + sync_dirty_buffer(bitmap_bh); group_release_blocks(desc, bh2, group_freed); freed += group_freed; @@ -466,10 +464,8 @@ write_unlock(&EXT2_I(inode)->i_meta_lock); mark_buffer_dirty(bitmap_bh); - if (sb->s_flags & MS_SYNCHRONOUS) { - ll_rw_block(WRITE, 1, &bitmap_bh); - wait_on_buffer(bitmap_bh); - } + if (sb->s_flags & MS_SYNCHRONOUS) + sync_dirty_buffer(bitmap_bh); ext2_debug ("allocating block %d. ", block); diff -Nru a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c --- a/fs/ext2/ialloc.c Wed Feb 12 14:47:58 2003 +++ b/fs/ext2/ialloc.c Wed Feb 12 14:47:58 2003 @@ -146,10 +146,8 @@ mark_buffer_dirty(EXT2_SB(sb)->s_sbh); } mark_buffer_dirty(bitmap_bh); - if (sb->s_flags & MS_SYNCHRONOUS) { - ll_rw_block(WRITE, 1, &bitmap_bh); - wait_on_buffer(bitmap_bh); - } + if (sb->s_flags & MS_SYNCHRONOUS) + sync_dirty_buffer(bitmap_bh); sb->s_dirt = 1; error_return: brelse(bitmap_bh); @@ -485,10 +483,8 @@ ext2_set_bit(i, bitmap_bh->b_data); mark_buffer_dirty(bitmap_bh); - if (sb->s_flags & MS_SYNCHRONOUS) { - ll_rw_block(WRITE, 1, &bitmap_bh); - wait_on_buffer(bitmap_bh); - } + if (sb->s_flags & MS_SYNCHRONOUS) + sync_dirty_buffer(bitmap_bh); brelse(bitmap_bh); ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1; diff -Nru a/fs/ext2/inode.c b/fs/ext2/inode.c --- a/fs/ext2/inode.c Wed Feb 12 14:47:57 2003 +++ b/fs/ext2/inode.c Wed Feb 12 14:47:57 2003 @@ -443,10 +443,8 @@ * But we now rely upon generic_osync_inode() * and b_inode_buffers. But not for directories. */ - if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); - } + if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) + sync_dirty_buffer(bh); parent = nr; } if (n == num) @@ -1208,8 +1206,7 @@ raw_inode->i_block[n] = ei->i_data[n]; mark_buffer_dirty(bh); if (do_sync) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); + sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing ext2 inode [%s:%08lx]\n", sb->s_id, (unsigned long) ino); diff -Nru a/fs/ext2/super.c b/fs/ext2/super.c --- a/fs/ext2/super.c Wed Feb 12 14:47:58 2003 +++ b/fs/ext2/super.c Wed Feb 12 14:47:58 2003 @@ -842,8 +842,7 @@ { es->s_wtime = cpu_to_le32(get_seconds()); mark_buffer_dirty(EXT2_SB(sb)->s_sbh); - ll_rw_block(WRITE, 1, &EXT2_SB(sb)->s_sbh); - wait_on_buffer(EXT2_SB(sb)->s_sbh); + sync_dirty_buffer(EXT2_SB(sb)->s_sbh); sb->s_dirt = 0; } diff -Nru a/fs/ext2/xattr.c b/fs/ext2/xattr.c --- a/fs/ext2/xattr.c Wed Feb 12 14:47:58 2003 +++ b/fs/ext2/xattr.c Wed Feb 12 14:47:58 2003 @@ -774,8 +774,7 @@ } mark_buffer_dirty(new_bh); if (IS_SYNC(inode)) { - ll_rw_block(WRITE, 1, &new_bh); - wait_on_buffer(new_bh); + sync_dirty_buffer(new_bh); error = -EIO; if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) goto cleanup; @@ -865,10 +864,8 @@ HDR(bh)->h_refcount = cpu_to_le32( le32_to_cpu(HDR(bh)->h_refcount) - 1); mark_buffer_dirty(bh); - if (IS_SYNC(inode)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); - } + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); DQUOT_FREE_BLOCK(inode, 1); } EXT2_I(inode)->i_file_acl = 0; diff -Nru a/fs/ext3/super.c b/fs/ext3/super.c --- a/fs/ext3/super.c Wed Feb 12 14:47:58 2003 +++ b/fs/ext3/super.c Wed Feb 12 14:47:58 2003 @@ -1627,10 +1627,8 @@ es->s_wtime = cpu_to_le32(get_seconds()); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "marking dirty"); mark_buffer_dirty(EXT3_SB(sb)->s_sbh); - if (sync) { - ll_rw_block(WRITE, 1, &EXT3_SB(sb)->s_sbh); - wait_on_buffer(EXT3_SB(sb)->s_sbh); - } + if (sync) + sync_dirty_buffer(EXT3_SB(sb)->s_sbh); } diff -Nru a/fs/fcntl.c b/fs/fcntl.c --- a/fs/fcntl.c Wed Feb 12 14:47:57 2003 +++ b/fs/fcntl.c Wed Feb 12 14:47:57 2003 @@ -320,6 +320,7 @@ * to fix this will be in libc. */ err = filp->f_owner.pid; + force_successful_syscall_return(); break; case F_SETOWN: err = f_setown(filp, arg, 1); diff -Nru a/fs/jbd/commit.c b/fs/jbd/commit.c --- a/fs/jbd/commit.c Wed Feb 12 14:47:58 2003 +++ b/fs/jbd/commit.c Wed Feb 12 14:47:58 2003 @@ -562,8 +562,7 @@ { struct buffer_head *bh = jh2bh(descriptor); set_buffer_uptodate(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); __brelse(bh); /* One for getblk() */ journal_unlock_journal_head(descriptor); } diff -Nru a/fs/jbd/journal.c b/fs/jbd/journal.c --- a/fs/jbd/journal.c Wed Feb 12 14:47:57 2003 +++ b/fs/jbd/journal.c Wed Feb 12 14:47:57 2003 @@ -960,9 +960,10 @@ BUFFER_TRACE(bh, "marking dirty"); mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); if (wait) - wait_on_buffer(bh); + sync_dirty_buffer(bh); + else + ll_rw_block(WRITE, 1, &bh); /* If we have just flushed the log (by marking s_start==0), then * any future commit will have to be careful to update the @@ -1296,8 +1297,7 @@ bh = journal->j_sb_buffer; BUFFER_TRACE(bh, "marking dirty"); mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); return 0; } diff -Nru a/fs/jbd/transaction.c b/fs/jbd/transaction.c --- a/fs/jbd/transaction.c Wed Feb 12 14:47:57 2003 +++ b/fs/jbd/transaction.c Wed Feb 12 14:47:57 2003 @@ -1079,8 +1079,7 @@ atomic_inc(&bh->b_count); spin_unlock(&journal_datalist_lock); need_brelse = 1; - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); spin_lock(&journal_datalist_lock); /* The buffer may become locked again at any time if it is redirtied */ @@ -1361,8 +1360,7 @@ } atomic_inc(&bh->b_count); spin_unlock(&journal_datalist_lock); - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); __brelse(bh); goto out; } diff -Nru a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c --- a/fs/jfs/jfs_imap.c Wed Feb 12 14:47:57 2003 +++ b/fs/jfs/jfs_imap.c Wed Feb 12 14:47:57 2003 @@ -2980,8 +2980,7 @@ j_sb->s_flag |= JFS_BAD_SAIT; mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); brelse(bh); return; } diff -Nru a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c --- a/fs/jfs/jfs_mount.c Wed Feb 12 14:47:57 2003 +++ b/fs/jfs/jfs_mount.c Wed Feb 12 14:47:57 2003 @@ -449,8 +449,7 @@ } mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); brelse(bh); return 0; diff -Nru a/fs/jfs/namei.c b/fs/jfs/namei.c --- a/fs/jfs/namei.c Wed Feb 12 14:47:58 2003 +++ b/fs/jfs/namei.c Wed Feb 12 14:47:58 2003 @@ -972,10 +972,8 @@ #if 0 set_buffer_uptodate(bp); mark_buffer_dirty(bp, 1); - if (IS_SYNC(dip)) { - ll_rw_block(WRITE, 1, &bp); - wait_on_buffer(bp); - } + if (IS_SYNC(dip)) + sync_dirty_buffer(bp); brelse(bp); #endif /* 0 */ ssize -= copy_size; diff -Nru a/fs/jfs/resize.c b/fs/jfs/resize.c --- a/fs/jfs/resize.c Wed Feb 12 14:47:58 2003 +++ b/fs/jfs/resize.c Wed Feb 12 14:47:58 2003 @@ -243,8 +243,7 @@ /* synchronously update superblock */ mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); brelse(bh); /* @@ -512,15 +511,13 @@ memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock)); mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh2); - wait_on_buffer(bh2); + sync_dirty_buffer(bh2); brelse(bh2); } /* write primary superblock */ mark_buffer_dirty(bh); - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); brelse(bh); goto resume; diff -Nru a/fs/minix/inode.c b/fs/minix/inode.c --- a/fs/minix/inode.c Wed Feb 12 14:47:58 2003 +++ b/fs/minix/inode.c Wed Feb 12 14:47:58 2003 @@ -517,8 +517,7 @@ bh = minix_update_inode(inode); if (bh && buffer_dirty(bh)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing minix inode [%s:%08lx]\n", diff -Nru a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c --- a/fs/ncpfs/sock.c Wed Feb 12 14:47:57 2003 +++ b/fs/ncpfs/sock.c Wed Feb 12 14:47:57 2003 @@ -757,9 +757,9 @@ What if we've blocked it ourselves? What about alarms? Why, in fact, are we mucking with the sigmask at all? -- r~ */ - if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL) + if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL) mask |= sigmask(SIGINT); - if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL) + if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL) mask |= sigmask(SIGQUIT); } siginitsetinv(¤t->blocked, mask); diff -Nru a/fs/ntfs/super.c b/fs/ntfs/super.c --- a/fs/ntfs/super.c Wed Feb 12 14:47:58 2003 +++ b/fs/ntfs/super.c Wed Feb 12 14:47:58 2003 @@ -505,8 +505,7 @@ memcpy(bh_primary->b_data, bh_backup->b_data, sb->s_blocksize); mark_buffer_dirty(bh_primary); - ll_rw_block(WRITE, 1, &bh_primary); - wait_on_buffer(bh_primary); + sync_dirty_buffer(bh_primary); if (buffer_uptodate(bh_primary)) { brelse(bh_backup); return bh_primary; diff -Nru a/fs/proc/base.c b/fs/proc/base.c --- a/fs/proc/base.c Wed Feb 12 14:47:58 2003 +++ b/fs/proc/base.c Wed Feb 12 14:47:58 2003 @@ -533,7 +533,24 @@ } #endif +static loff_t mem_lseek(struct file * file, loff_t offset, int orig) +{ + switch (orig) { + case 0: + file->f_pos = offset; + break; + case 1: + file->f_pos += offset; + break; + default: + return -EINVAL; + } + force_successful_syscall_return(); + return file->f_pos; +} + static struct file_operations proc_mem_operations = { + .llseek = mem_lseek, .read = mem_read, .write = mem_write, .open = mem_open, diff -Nru a/fs/qnx4/inode.c b/fs/qnx4/inode.c --- a/fs/qnx4/inode.c Wed Feb 12 14:47:58 2003 +++ b/fs/qnx4/inode.c Wed Feb 12 14:47:58 2003 @@ -44,8 +44,7 @@ bh = qnx4_update_inode(inode); if (bh && buffer_dirty(bh)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing qnx4 inode [%s:%08lx]\n", diff -Nru a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c --- a/fs/reiserfs/journal.c Wed Feb 12 14:47:58 2003 +++ b/fs/reiserfs/journal.c Wed Feb 12 14:47:58 2003 @@ -735,8 +735,7 @@ } mark_buffer_dirty(jl->j_commit_bh) ; - ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ; - wait_on_buffer(jl->j_commit_bh) ; + sync_dirty_buffer(jl->j_commit_bh) ; if (!buffer_uptodate(jl->j_commit_bh)) { reiserfs_panic(s, "journal-615: buffer write failed\n") ; } @@ -828,8 +827,7 @@ jh->j_first_unflushed_offset = cpu_to_le32(offset) ; jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ; set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ; - ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ; - wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; + sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ; if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) { printk( "reiserfs: journal-837: IO error during journal replay\n" ); return -EIO ; diff -Nru a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c --- a/fs/reiserfs/resize.c Wed Feb 12 14:47:57 2003 +++ b/fs/reiserfs/resize.c Wed Feb 12 14:47:57 2003 @@ -120,8 +120,7 @@ mark_buffer_dirty(bitmap[i].bh) ; set_buffer_uptodate(bitmap[i].bh); - ll_rw_block(WRITE, 1, &bitmap[i].bh); - wait_on_buffer(bitmap[i].bh); + sync_dirty_buffer(bitmap[i].bh); // update bitmap_info stuff bitmap[i].first_zero_hint=1; bitmap[i].free_count = sb_blocksize(sb) * 8 - 1; diff -Nru a/fs/select.c b/fs/select.c --- a/fs/select.c Wed Feb 12 14:47:57 2003 +++ b/fs/select.c Wed Feb 12 14:47:57 2003 @@ -176,7 +176,7 @@ { struct poll_wqueues table; poll_table *wait; - int retval, i, off; + int retval, i; long __timeout = *timeout; read_lock(¤t->files->file_lock); @@ -193,38 +193,53 @@ wait = NULL; retval = 0; for (;;) { + unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; set_current_state(TASK_INTERRUPTIBLE); - for (i = 0 ; i < n; i++) { - unsigned long bit = BIT(i); - unsigned long mask; - struct file *file; - off = i / __NFDBITS; - if (!(bit & BITS(fds, off))) + inp = fds->in; outp = fds->out; exp = fds->ex; + rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; + + for (i = 0; i < n; ++rinp, ++routp, ++rexp) { + unsigned long in, out, ex, all_bits, bit = 1, mask, j; + unsigned long res_in = 0, res_out = 0, res_ex = 0; + struct file_operations *f_op = NULL; + struct file *file = NULL; + + in = *inp++; out = *outp++; ex = *exp++; + all_bits = in | out | ex; + if (all_bits == 0) continue; - file = fget(i); - mask = POLLNVAL; - if (file) { + + for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { + if (i >= n) + break; + if (!(bit & all_bits)) + continue; + file = fget(i); + if (file) + f_op = file->f_op; mask = DEFAULT_POLLMASK; - if (file->f_op && file->f_op->poll) - mask = file->f_op->poll(file, wait); - fput(file); - } - if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) { - SET(bit, __RES_IN(fds,off)); - retval++; - wait = NULL; - } - if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) { - SET(bit, __RES_OUT(fds,off)); - retval++; - wait = NULL; - } - if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) { - SET(bit, __RES_EX(fds,off)); - retval++; - wait = NULL; + if (file) { + if (f_op && f_op->poll) + mask = (*f_op->poll)(file, retval ? NULL : wait); + fput(file); + if ((mask & POLLIN_SET) && (in & bit)) { + res_in |= bit; + retval++; + } + if ((mask & POLLOUT_SET) && (out & bit)) { + res_out |= bit; + retval++; + } + if ((mask & POLLEX_SET) && (ex & bit)) { + res_ex |= bit; + retval++; + } + } } + if (res_in) *rinp = res_in; + if (res_out) *routp = res_out; + if (res_ex) *rexp = res_ex; } wait = NULL; if (retval || !__timeout || signal_pending(current)) diff -Nru a/fs/sysv/inode.c b/fs/sysv/inode.c --- a/fs/sysv/inode.c Wed Feb 12 14:47:57 2003 +++ b/fs/sysv/inode.c Wed Feb 12 14:47:57 2003 @@ -265,8 +265,7 @@ bh = sysv_update_inode(inode); if (bh && buffer_dirty(bh)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); diff -Nru a/fs/sysv/itree.c b/fs/sysv/itree.c --- a/fs/sysv/itree.c Wed Feb 12 14:47:57 2003 +++ b/fs/sysv/itree.c Wed Feb 12 14:47:57 2003 @@ -15,10 +15,8 @@ static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode) { mark_buffer_dirty_inode(bh, inode); - if (IS_SYNC(inode)) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); - } + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); } static int block_to_path(struct inode *inode, long block, int offsets[DEPTH]) diff -Nru a/fs/udf/inode.c b/fs/udf/inode.c --- a/fs/udf/inode.c Wed Feb 12 14:47:58 2003 +++ b/fs/udf/inode.c Wed Feb 12 14:47:58 2003 @@ -1520,8 +1520,7 @@ mark_buffer_dirty(bh); if (do_sync) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); + sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk("IO error syncing udf inode [%s:%08lx]\n", diff -Nru a/fs/ufs/balloc.c b/fs/ufs/balloc.c --- a/fs/ufs/balloc.c Wed Feb 12 14:47:58 2003 +++ b/fs/ufs/balloc.c Wed Feb 12 14:47:58 2003 @@ -114,6 +114,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } @@ -199,6 +200,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } @@ -228,10 +230,8 @@ memset (bh->b_data, 0, sb->s_blocksize); \ set_buffer_uptodate(bh); \ mark_buffer_dirty (bh); \ - if (IS_SYNC(inode)) { \ - ll_rw_block (WRITE, 1, &bh); \ - wait_on_buffer (bh); \ - } \ + if (IS_SYNC(inode)) \ + sync_dirty_buffer(bh); \ brelse (bh); \ } @@ -364,10 +364,8 @@ clear_buffer_dirty(bh); bh->b_blocknr = result + i; mark_buffer_dirty (bh); - if (IS_SYNC(inode)) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); - } + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); brelse (bh); } else @@ -459,6 +457,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } @@ -584,6 +583,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } diff -Nru a/fs/ufs/dir.c b/fs/ufs/dir.c --- a/fs/ufs/dir.c Wed Feb 12 14:47:58 2003 +++ b/fs/ufs/dir.c Wed Feb 12 14:47:58 2003 @@ -356,10 +356,8 @@ dir->i_version++; de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); mark_buffer_dirty(bh); - if (IS_DIRSYNC(dir)) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer(bh); - } + if (IS_DIRSYNC(dir)) + sync_dirty_buffer(bh); brelse (bh); } @@ -457,10 +455,8 @@ de->d_ino = cpu_to_fs32(sb, inode->i_ino); ufs_set_de_type(sb, de, inode->i_mode); mark_buffer_dirty(bh); - if (IS_DIRSYNC(dir)) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); - } + if (IS_DIRSYNC(dir)) + sync_dirty_buffer(bh); brelse (bh); dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_version++; @@ -508,10 +504,8 @@ inode->i_ctime = inode->i_mtime = CURRENT_TIME; mark_inode_dirty(inode); mark_buffer_dirty(bh); - if (IS_DIRSYNC(inode)) { - ll_rw_block(WRITE, 1, &bh); - wait_on_buffer(bh); - } + if (IS_DIRSYNC(inode)) + sync_dirty_buffer(bh); brelse(bh); UFSD(("EXIT\n")) return 0; diff -Nru a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c --- a/fs/ufs/ialloc.c Wed Feb 12 14:47:58 2003 +++ b/fs/ufs/ialloc.c Wed Feb 12 14:47:58 2003 @@ -124,6 +124,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi); ubh_wait_on_buffer (UCPI_UBH); } @@ -248,6 +249,7 @@ ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { + ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi); ubh_wait_on_buffer (UCPI_UBH); } diff -Nru a/fs/ufs/inode.c b/fs/ufs/inode.c --- a/fs/ufs/inode.c Wed Feb 12 14:47:57 2003 +++ b/fs/ufs/inode.c Wed Feb 12 14:47:57 2003 @@ -298,10 +298,8 @@ } mark_buffer_dirty(bh); - if (IS_SYNC(inode)) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); - } + if (IS_SYNC(inode)) + sync_dirty_buffer(bh); inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); out: @@ -635,10 +633,8 @@ memset (ufs_inode, 0, sizeof(struct ufs_inode)); mark_buffer_dirty(bh); - if (do_sync) { - ll_rw_block (WRITE, 1, &bh); - wait_on_buffer (bh); - } + if (do_sync) + sync_dirty_buffer(bh); brelse (bh); UFSD(("EXIT\n")) diff -Nru a/fs/ufs/truncate.c b/fs/ufs/truncate.c --- a/fs/ufs/truncate.c Wed Feb 12 14:47:58 2003 +++ b/fs/ufs/truncate.c Wed Feb 12 14:47:58 2003 @@ -284,6 +284,7 @@ } } if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { + ubh_wait_on_buffer (ind_ubh); ubh_ll_rw_block (WRITE, 1, &ind_ubh); ubh_wait_on_buffer (ind_ubh); } @@ -351,6 +352,7 @@ } } if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { + ubh_wait_on_buffer (dind_bh); ubh_ll_rw_block (WRITE, 1, &dind_bh); ubh_wait_on_buffer (dind_bh); } @@ -415,6 +417,7 @@ } } if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { + ubh_wait_on_buffer (tind_bh); ubh_ll_rw_block (WRITE, 1, &tind_bh); ubh_wait_on_buffer (tind_bh); } diff -Nru a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h --- a/include/asm-alpha/agp.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-alpha/agp.h Wed Feb 12 14:47:57 2003 @@ -10,4 +10,11 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* + * Page-protection value to be used for AGP memory mapped into kernel space. For + * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to + * be an uncached mapping (such as write-combining). + */ +#define PAGE_AGP PAGE_KERNEL_NOCACHE /* XXX fix me */ + #endif diff -Nru a/include/asm-i386/agp.h b/include/asm-i386/agp.h --- a/include/asm-i386/agp.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-i386/agp.h Wed Feb 12 14:47:57 2003 @@ -20,4 +20,11 @@ worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") +/* + * Page-protection value to be used for AGP memory mapped into kernel space. For + * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to + * be an uncached mapping (such as write-combining). + */ +#define PAGE_AGP PAGE_KERNEL_NOCACHE + #endif diff -Nru a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h --- a/include/asm-i386/hw_irq.h Wed Feb 12 14:47:58 2003 +++ b/include/asm-i386/hw_irq.h Wed Feb 12 14:47:58 2003 @@ -140,4 +140,6 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif +extern irq_desc_t irq_desc [NR_IRQS]; + #endif /* _ASM_HW_IRQ_H */ diff -Nru a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h --- a/include/asm-i386/ptrace.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-i386/ptrace.h Wed Feb 12 14:47:57 2003 @@ -57,6 +57,7 @@ #ifdef __KERNEL__ #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) #define instruction_pointer(regs) ((regs)->eip) +#define force_successful_syscall_return() do { } while (0) #endif #endif diff -Nru a/include/asm-ia64/page.h b/include/asm-ia64/page.h --- a/include/asm-ia64/page.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-ia64/page.h Wed Feb 12 14:47:57 2003 @@ -88,7 +88,12 @@ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #ifndef CONFIG_DISCONTIGMEM -#define pfn_valid(pfn) ((pfn) < max_mapnr) +# ifdef CONFIG_VIRTUAL_MEM_MAP + extern int ia64_pfn_valid (unsigned long pfn); +# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) +# else +# define pfn_valid(pfn) ((pfn) < max_mapnr) +# endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define page_to_pfn(page) ((unsigned long) (page - mem_map)) #define pfn_to_page(pfn) (mem_map + (pfn)) diff -Nru a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h --- a/include/asm-ia64/perfmon.h Wed Feb 12 14:47:58 2003 +++ b/include/asm-ia64/perfmon.h Wed Feb 12 14:47:58 2003 @@ -1,5 +1,5 @@ /* - * Copyright (C) 2001-2002 Hewlett-Packard Co + * Copyright (C) 2001-2003 Hewlett-Packard Co * Stephane Eranian */ diff -Nru a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h --- a/include/asm-ia64/pgtable.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-ia64/pgtable.h Wed Feb 12 14:47:57 2003 @@ -204,7 +204,13 @@ #define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) +#ifdef CONFIG_VIRTUAL_MEM_MAP +# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) +# define VMALLOC_END vmalloc_end + extern unsigned long vmalloc_end; +#else +# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) +#endif /* * Conversion functions: convert page frame number (pfn) and a protection value to a page @@ -422,6 +428,18 @@ typedef pte_t *pte_addr_t; +# ifdef CONFIG_VIRTUAL_MEM_MAP + + /* arch mem_map init routine is needed due to holes in a virtual mem_map */ +# define HAVE_ARCH_MEMMAP_INIT + + typedef void memmap_init_callback_t (struct page *start, unsigned long size, + int nid, unsigned long zone, unsigned long start_pfn); + + extern void arch_memmap_init (memmap_init_callback_t *callback, struct page *start, + unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn); +# endif /* CONFIG_VIRTUAL_MEM_MAP */ # endif /* !__ASSEMBLY__ */ /* diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h --- a/include/asm-ia64/processor.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-ia64/processor.h Wed Feb 12 14:47:57 2003 @@ -68,11 +68,11 @@ /* - * This shift should be large enough to be able to represent - * 1000000/itc_freq with good accuracy while being small enough to fit - * 1000000<. */ #define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ -#define PTRACE_GETSIGINFO 13 /* get child's siginfo structure */ -#define PTRACE_SETSIGINFO 14 /* set child's siginfo structure */ +#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in ) */ +#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in ) */ #define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */ #define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */ diff -Nru a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h --- a/include/asm-ia64/sal.h Wed Feb 12 14:47:58 2003 +++ b/include/asm-ia64/sal.h Wed Feb 12 14:47:58 2003 @@ -10,8 +10,8 @@ * Copyright (C) 2001 Intel * Copyright (C) 2002 Jenna Hall * Copyright (C) 2001 Fred Lewis - * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co - * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang + * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co + * David Mosberger-Tang * Copyright (C) 1999 Srinivasa Prasad Thirumalachar * * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001 @@ -19,7 +19,7 @@ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000 * revision of the SAL spec. * 99/09/29 davidm Updated for SAL 2.6. - * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6) + * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6) * (plus examples of platform error info structures from smariset @ Intel) */ @@ -208,30 +208,30 @@ /* SAL information type encodings */ enum { - SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */ - SAL_INFO_TYPE_INIT = 1, /* Init information */ - SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */ - SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */ + SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */ + SAL_INFO_TYPE_INIT = 1, /* Init information */ + SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */ + SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */ }; /* Encodings for machine check parameter types */ enum { - SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezevous interrupt */ - SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */ - SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */ + SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezevous interrupt */ + SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */ + SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */ }; /* Encodings for rendezvous mechanisms */ enum { - SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */ - SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/ + SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */ + SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/ }; /* Encodings for vectors which can be registered by the OS with SAL */ enum { - SAL_VECTOR_OS_MCA = 0, - SAL_VECTOR_OS_INIT = 1, - SAL_VECTOR_OS_BOOT_RENDEZ = 2 + SAL_VECTOR_OS_MCA = 0, + SAL_VECTOR_OS_INIT = 1, + SAL_VECTOR_OS_BOOT_RENDEZ = 2 }; /* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */ @@ -239,387 +239,349 @@ #define SAL_MC_PARAM_BINIT_ESCALATE 0x10 /* -** Definition of the SAL Error Log from the SAL spec -*/ + * Definition of the SAL Error Log from the SAL spec + */ /* SAL Error Record Section GUID Definitions */ #define SAL_PROC_DEV_ERR_SECT_GUID \ - EFI_GUID ( 0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_BUS_ERR_SECT_GUID \ - EFI_GUID( 0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, \ - 0xc7, 0x3c, 0x88, 0x81 ) + EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) -#define MAX_CACHE_ERRORS 6 -#define MAX_TLB_ERRORS 6 -#define MAX_BUS_ERRORS 1 +#define MAX_CACHE_ERRORS 6 +#define MAX_TLB_ERRORS 6 +#define MAX_BUS_ERRORS 1 /* Definition of version according to SAL spec for logging purposes */ -typedef struct sal_log_revision -{ - u8 minor; /* BCD (0..99) */ - u8 major; /* BCD (0..99) */ +typedef struct sal_log_revision { + u8 minor; /* BCD (0..99) */ + u8 major; /* BCD (0..99) */ } sal_log_revision_t; /* Definition of timestamp according to SAL spec for logging purposes */ -typedef struct sal_log_timestamp -{ +typedef struct sal_log_timestamp { u8 slh_second; /* Second (0..59) */ - u8 slh_minute; /* Minute (0..59) */ - u8 slh_hour; /* Hour (0..23) */ - u8 slh_reserved; - u8 slh_day; /* Day (1..31) */ - u8 slh_month; /* Month (1..12) */ - u8 slh_year; /* Year (00..99) */ - u8 slh_century; /* Century (19, 20, 21, ...) */ + u8 slh_minute; /* Minute (0..59) */ + u8 slh_hour; /* Hour (0..23) */ + u8 slh_reserved; + u8 slh_day; /* Day (1..31) */ + u8 slh_month; /* Month (1..12) */ + u8 slh_year; /* Year (00..99) */ + u8 slh_century; /* Century (19, 20, 21, ...) */ } sal_log_timestamp_t; /* Definition of log record header structures */ -typedef struct sal_log_record_header -{ - u64 id; /* Unique monotonically increasing ID */ - sal_log_revision_t revision; /* Major and Minor revision of header */ - u16 severity; /* Error Severity */ - u32 len; /* Length of this error log in bytes */ - sal_log_timestamp_t timestamp; /* Timestamp */ - efi_guid_t platform_guid; /* Unique OEM Platform ID */ +typedef struct sal_log_record_header { + u64 id; /* Unique monotonically increasing ID */ + sal_log_revision_t revision; /* Major and Minor revision of header */ + u16 severity; /* Error Severity */ + u32 len; /* Length of this error log in bytes */ + sal_log_timestamp_t timestamp; /* Timestamp */ + efi_guid_t platform_guid; /* Unique OEM Platform ID */ } sal_log_record_header_t; /* Definition of log section header structures */ -typedef struct sal_log_sec_header -{ - efi_guid_t guid; /* Unique Section ID */ - sal_log_revision_t revision; /* Major and Minor revision of Section */ - u16 reserved; - u32 len; /* Section length */ +typedef struct sal_log_sec_header { + efi_guid_t guid; /* Unique Section ID */ + sal_log_revision_t revision; /* Major and Minor revision of Section */ + u16 reserved; + u32 len; /* Section length */ } sal_log_section_hdr_t; -typedef struct sal_log_mod_error_info -{ - struct - { - u64 check_info : 1, - requestor_identifier : 1, - responder_identifier : 1, - target_identifier : 1, - precise_ip : 1, - reserved : 59; - } valid; - u64 check_info; - u64 requestor_identifier; - u64 responder_identifier; - u64 target_identifier; - u64 precise_ip; +typedef struct sal_log_mod_error_info { + struct { + u64 check_info : 1, + requestor_identifier : 1, + responder_identifier : 1, + target_identifier : 1, + precise_ip : 1, + reserved : 59; + } valid; + u64 check_info; + u64 requestor_identifier; + u64 responder_identifier; + u64 target_identifier; + u64 precise_ip; } sal_log_mod_error_info_t; -typedef struct sal_processor_static_info -{ - struct - { - u64 minstate : 1, - br : 1, - cr : 1, - ar : 1, - rr : 1, - fr : 1, - reserved : 58; - } valid; - pal_min_state_area_t min_state_area; - u64 br[8]; - u64 cr[128]; - u64 ar[128]; - u64 rr[8]; - struct ia64_fpreg fr[128]; +typedef struct sal_processor_static_info { + struct { + u64 minstate : 1, + br : 1, + cr : 1, + ar : 1, + rr : 1, + fr : 1, + reserved : 58; + } valid; + pal_min_state_area_t min_state_area; + u64 br[8]; + u64 cr[128]; + u64 ar[128]; + u64 rr[8]; + struct ia64_fpreg fr[128]; } sal_processor_static_info_t; -typedef struct sal_log_processor_info -{ - sal_log_section_hdr_t header; - struct - { - u64 proc_error_map : 1, - proc_state_param : 1, - proc_cr_lid : 1, - psi_static_struct : 1, - num_cache_check : 4, - num_tlb_check : 4, - num_bus_check : 4, - num_reg_file_check : 4, - num_ms_check : 4, - cpuid_info : 1, - reserved1 : 39; - } valid; - u64 proc_error_map; - u64 proc_state_parameter; - u64 proc_cr_lid; - sal_log_mod_error_info_t cache_check_info[16]; - sal_log_mod_error_info_t tlb_check_info[16]; - sal_log_mod_error_info_t bus_check_info[16]; - sal_log_mod_error_info_t reg_file_check_info[16]; - sal_log_mod_error_info_t ms_check_info[16]; - struct - { - u64 regs[5]; - u64 reserved; - } cpuid_info; - sal_processor_static_info_t processor_static_info; +typedef struct sal_log_processor_info { + sal_log_section_hdr_t header; + struct { + u64 proc_error_map : 1, + proc_state_param : 1, + proc_cr_lid : 1, + psi_static_struct : 1, + num_cache_check : 4, + num_tlb_check : 4, + num_bus_check : 4, + num_reg_file_check : 4, + num_ms_check : 4, + cpuid_info : 1, + reserved1 : 39; + } valid; + u64 proc_error_map; + u64 proc_state_parameter; + u64 proc_cr_lid; + sal_log_mod_error_info_t cache_check_info[16]; + sal_log_mod_error_info_t tlb_check_info[16]; + sal_log_mod_error_info_t bus_check_info[16]; + sal_log_mod_error_info_t reg_file_check_info[16]; + sal_log_mod_error_info_t ms_check_info[16]; + struct { + u64 regs[5]; + u64 reserved; + } cpuid_info; + sal_processor_static_info_t processor_static_info; } sal_log_processor_info_t; /* platform error log structures */ -typedef struct sal_log_mem_dev_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 error_status : 1, - physical_addr : 1, - addr_mask : 1, - node : 1, - card : 1, - module : 1, - bank : 1, - device : 1, - row : 1, - column : 1, - bit_position : 1, - requestor_id : 1, - responder_id : 1, - target_id : 1, - bus_spec_data : 1, - oem_id : 1, - oem_data : 1, - reserved : 47; - } valid; - u64 error_status; - u64 physical_addr; - u64 addr_mask; - u16 node; - u16 card; - u16 module; - u16 bank; - u16 device; - u16 row; - u16 column; - u16 bit_position; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u64 bus_spec_data; - u8 oem_id[16]; - u8 oem_data[1]; /* Variable length data */ +typedef struct sal_log_mem_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 error_status : 1, + physical_addr : 1, + addr_mask : 1, + node : 1, + card : 1, + module : 1, + bank : 1, + device : 1, + row : 1, + column : 1, + bit_position : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_id : 1, + oem_data : 1, + reserved : 47; + } valid; + u64 error_status; + u64 physical_addr; + u64 addr_mask; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_position; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_id[16]; + u8 oem_data[1]; /* Variable length data */ } sal_log_mem_dev_err_info_t; -typedef struct sal_log_sel_dev_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 record_id : 1, - record_type : 1, - generator_id : 1, - evm_rev : 1, - sensor_type : 1, - sensor_num : 1, - event_dir : 1, - event_data1 : 1, - event_data2 : 1, - event_data3 : 1, - reserved : 54; - } valid; - u16 record_id; - u8 record_type; - u8 timestamp[4]; - u16 generator_id; - u8 evm_rev; - u8 sensor_type; - u8 sensor_num; - u8 event_dir; - u8 event_data1; - u8 event_data2; - u8 event_data3; +typedef struct sal_log_sel_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 record_id : 1, + record_type : 1, + generator_id : 1, + evm_rev : 1, + sensor_type : 1, + sensor_num : 1, + event_dir : 1, + event_data1 : 1, + event_data2 : 1, + event_data3 : 1, + reserved : 54; + } valid; + u16 record_id; + u8 record_type; + u8 timestamp[4]; + u16 generator_id; + u8 evm_rev; + u8 sensor_type; + u8 sensor_num; + u8 event_dir; + u8 event_data1; + u8 event_data2; + u8 event_data3; } sal_log_sel_dev_err_info_t; -typedef struct sal_log_pci_bus_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 err_status : 1, - err_type : 1, - bus_id : 1, - bus_address : 1, - bus_data : 1, - bus_cmd : 1, - requestor_id : 1, - responder_id : 1, - target_id : 1, - oem_data : 1, - reserved : 54; - } valid; - u64 err_status; - u16 err_type; - u16 bus_id; - u32 reserved; - u64 bus_address; - u64 bus_data; - u64 bus_cmd; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u8 oem_data[1]; /* Variable length data */ +typedef struct sal_log_pci_bus_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + err_type : 1, + bus_id : 1, + bus_address : 1, + bus_data : 1, + bus_cmd : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + oem_data : 1, + reserved : 54; + } valid; + u64 err_status; + u16 err_type; + u16 bus_id; + u32 reserved; + u64 bus_address; + u64 bus_data; + u64 bus_cmd; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u8 oem_data[1]; /* Variable length data */ } sal_log_pci_bus_err_info_t; -typedef struct sal_log_smbios_dev_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 event_type : 1, - length : 1, - time_stamp : 1, - data : 1, - reserved1 : 60; - } valid; - u8 event_type; - u8 length; - u8 time_stamp[6]; - u8 data[1]; // data of variable length, length == slsmb_length +typedef struct sal_log_smbios_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 event_type : 1, + length : 1, + time_stamp : 1, + data : 1, + reserved1 : 60; + } valid; + u8 event_type; + u8 length; + u8 time_stamp[6]; + u8 data[1]; /* data of variable length, length == slsmb_length */ } sal_log_smbios_dev_err_info_t; -typedef struct sal_log_pci_comp_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 err_status : 1, - comp_info : 1, - num_mem_regs : 1, - num_io_regs : 1, - reg_data_pairs : 1, - oem_data : 1, - reserved : 58; - } valid; - u64 err_status; - struct - { - u16 vendor_id; - u16 device_id; - u8 class_code[3]; - u8 func_num; - u8 dev_num; - u8 bus_num; - u8 seg_num; - u8 reserved[5]; - } comp_info; - u32 num_mem_regs; - u32 num_io_regs; - u64 reg_data_pairs[1]; - /* array of address/data register pairs is num_mem_regs + num_io_regs - elements long. Each array element consists of a u64 address followed - by a u64 data value. The oem_data array immediately follows the - reg_data_pairs array */ - u8 oem_data[1]; /* Variable length data */ +typedef struct sal_log_pci_comp_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + comp_info : 1, + num_mem_regs : 1, + num_io_regs : 1, + reg_data_pairs : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + struct { + u16 vendor_id; + u16 device_id; + u8 class_code[3]; + u8 func_num; + u8 dev_num; + u8 bus_num; + u8 seg_num; + u8 reserved[5]; + } comp_info; + u32 num_mem_regs; + u32 num_io_regs; + u64 reg_data_pairs[1]; + /* + * array of address/data register pairs is num_mem_regs + num_io_regs elements + * long. Each array element consists of a u64 address followed by a u64 data + * value. The oem_data array immediately follows the reg_data_pairs array + */ + u8 oem_data[1]; /* Variable length data */ } sal_log_pci_comp_err_info_t; -typedef struct sal_log_plat_specific_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 err_status : 1, - guid : 1, - oem_data : 1, - reserved : 61; - } valid; - u64 err_status; - efi_guid_t guid; - u8 oem_data[1]; /* platform specific variable length data */ +typedef struct sal_log_plat_specific_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + guid : 1, + oem_data : 1, + reserved : 61; + } valid; + u64 err_status; + efi_guid_t guid; + u8 oem_data[1]; /* platform specific variable length data */ } sal_log_plat_specific_err_info_t; -typedef struct sal_log_host_ctlr_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 err_status : 1, - requestor_id : 1, - responder_id : 1, - target_id : 1, - bus_spec_data : 1, - oem_data : 1, - reserved : 58; - } valid; - u64 err_status; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u64 bus_spec_data; - u8 oem_data[1]; /* Variable length OEM data */ +typedef struct sal_log_host_ctlr_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_data[1]; /* Variable length OEM data */ } sal_log_host_ctlr_err_info_t; -typedef struct sal_log_plat_bus_err_info -{ - sal_log_section_hdr_t header; - struct - { - u64 err_status : 1, - requestor_id : 1, - responder_id : 1, - target_id : 1, - bus_spec_data : 1, - oem_data : 1, - reserved : 58; - } valid; - u64 err_status; - u64 requestor_id; - u64 responder_id; - u64 target_id; - u64 bus_spec_data; - u8 oem_data[1]; /* Variable length OEM data */ +typedef struct sal_log_plat_bus_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_data[1]; /* Variable length OEM data */ } sal_log_plat_bus_err_info_t; /* Overall platform error section structure */ -typedef union sal_log_platform_err_info -{ - sal_log_mem_dev_err_info_t mem_dev_err; - sal_log_sel_dev_err_info_t sel_dev_err; - sal_log_pci_bus_err_info_t pci_bus_err; - sal_log_smbios_dev_err_info_t smbios_dev_err; - sal_log_pci_comp_err_info_t pci_comp_err; - sal_log_plat_specific_err_info_t plat_specific_err; - sal_log_host_ctlr_err_info_t host_ctlr_err; - sal_log_plat_bus_err_info_t plat_bus_err; +typedef union sal_log_platform_err_info { + sal_log_mem_dev_err_info_t mem_dev_err; + sal_log_sel_dev_err_info_t sel_dev_err; + sal_log_pci_bus_err_info_t pci_bus_err; + sal_log_smbios_dev_err_info_t smbios_dev_err; + sal_log_pci_comp_err_info_t pci_comp_err; + sal_log_plat_specific_err_info_t plat_specific_err; + sal_log_host_ctlr_err_info_t host_ctlr_err; + sal_log_plat_bus_err_info_t plat_bus_err; } sal_log_platform_err_info_t; /* SAL log over-all, multi-section error record structure (processor+platform) */ -typedef struct err_rec -{ - sal_log_record_header_t sal_elog_header; - sal_log_processor_info_t proc_err; - sal_log_platform_err_info_t plat_err; - u8 oem_data_pad[1024]; +typedef struct err_rec { + sal_log_record_header_t sal_elog_header; + sal_log_processor_info_t proc_err; + sal_log_platform_err_info_t plat_err; + u8 oem_data_pad[1024]; } ia64_err_rec_t; /* @@ -648,7 +610,6 @@ } - /* Initialize all the processor and platform level instruction and data caches */ static inline s64 ia64_sal_cache_init (void) @@ -658,8 +619,9 @@ return isrv.status; } -/* Clear the processor and platform information logged by SAL with respect to the - * machine state at the time of MCA's, INITs, CMCs, or CPEs. +/* + * Clear the processor and platform information logged by SAL with respect to the machine + * state at the time of MCA's, INITs, CMCs, or CPEs. */ static inline s64 ia64_sal_clear_state_info (u64 sal_info_type) @@ -684,9 +646,11 @@ return 0; return isrv.v0; -} -/* Get the maximum size of the information logged by SAL with respect to the machine - * state at the time of MCAs, INITs, CMCs, or CPEs. +} + +/* + * Get the maximum size of the information logged by SAL with respect to the machine state + * at the time of MCAs, INITs, CMCs, or CPEs. */ static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type) @@ -699,8 +663,9 @@ return isrv.v0; } -/* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup - * from the monarch processor. +/* + * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from + * the monarch processor. */ static inline s64 ia64_sal_mc_rendez (void) @@ -710,15 +675,17 @@ return isrv.status; } -/* Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during - * the machine check rendezvous sequence as well as the mechanism to wake up the +/* + * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during + * the machine check rendezvous sequence as well as the mechanism to wake up the * non-monarch processor at the end of machine check processing. */ static inline s64 ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always) { struct ia64_sal_retval isrv; - SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, timeout, rz_always, 0, 0); + SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, + timeout, rz_always, 0, 0); return isrv.status; } @@ -744,8 +711,8 @@ } /* - * Register physical addresses of locations needed by SAL when SAL - * procedures are invoked in virtual mode. + * Register physical addresses of locations needed by SAL when SAL procedures are invoked + * in virtual mode. */ static inline s64 ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr) @@ -756,9 +723,10 @@ return isrv.status; } -/* Register software dependent code locations within SAL. These locations are handlers - * or entry points where SAL will pass control for the specified event. These event - * handlers are for the bott rendezvous, MCAs and INIT scenarios. +/* + * Register software dependent code locations within SAL. These locations are handlers or + * entry points where SAL will pass control for the specified event. These event handlers + * are for the bott rendezvous, MCAs and INIT scenarios. */ static inline s64 ia64_sal_set_vectors (u64 vector_type, @@ -768,10 +736,11 @@ struct ia64_sal_retval isrv; SAL_CALL(isrv, SAL_SET_VECTORS, vector_type, handler_addr1, gp1, handler_len1, - handler_addr2, gp2, handler_len2); + handler_addr2, gp2, handler_len2); return isrv.status; -} +} + /* Update the contents of PAL block in the non-volatile storage device */ static inline s64 ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, diff -Nru a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h --- a/include/asm-ia64/serial.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-ia64/serial.h Wed Feb 12 14:47:57 2003 @@ -59,7 +59,6 @@ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ - #ifdef CONFIG_SERIAL_MANY_PORTS #define EXTRA_SERIAL_PORT_DEFNS \ { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \ diff -Nru a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h --- a/include/asm-ia64/unistd.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-ia64/unistd.h Wed Feb 12 14:47:57 2003 @@ -223,7 +223,7 @@ #define __NR_sched_setaffinity 1231 #define __NR_sched_getaffinity 1232 #define __NR_set_tid_address 1233 -/* 1234 available for reuse */ +#define __NR_fadvise64 1234 /* 1235 available for reuse */ #define __NR_exit_group 1236 #define __NR_lookup_dcookie 1237 diff -Nru a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h --- a/include/asm-sparc64/agp.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-sparc64/agp.h Wed Feb 12 14:47:57 2003 @@ -8,4 +8,11 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() +/* + * Page-protection value to be used for AGP memory mapped into kernel space. For + * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to + * be an uncached mapping (such as write-combining). + */ +#define PAGE_AGP PAGE_KERNEL_NOCACHE + #endif diff -Nru a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h --- a/include/asm-x86_64/agp.h Wed Feb 12 14:47:57 2003 +++ b/include/asm-x86_64/agp.h Wed Feb 12 14:47:57 2003 @@ -20,4 +20,11 @@ worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") +/* + * Page-protection value to be used for AGP memory mapped into kernel space. For + * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to + * be an uncached mapping (such as write-combining). + */ +#define PAGE_AGP PAGE_KERNEL_NOCACHE + #endif diff -Nru a/include/linux/acpi_serial.h b/include/linux/acpi_serial.h --- a/include/linux/acpi_serial.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/acpi_serial.h Wed Feb 12 14:47:58 2003 @@ -9,6 +9,8 @@ * */ +#include + extern void setup_serial_acpi(void *); #define ACPI_SIG_LEN 4 diff -Nru a/include/linux/agp_backend.h b/include/linux/agp_backend.h --- a/include/linux/agp_backend.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/agp_backend.h Wed Feb 12 14:47:58 2003 @@ -53,17 +53,6 @@ INTEL_460GX, INTEL_I7505, VIA_GENERIC, - VIA_VP3, - VIA_MVP3, - VIA_MVP4, - VIA_APOLLO_PRO, - VIA_APOLLO_KX133, - VIA_APOLLO_KT133, - VIA_APOLLO_KT400, - VIA_APOLLO_KT400_3, - VIA_APOLLO_PRO_266, - VIA_VT8605, - VIA_P4X, SIS_GENERIC, AMD_GENERIC, AMD_IRONGATE, @@ -84,6 +73,7 @@ SVWRKS_LE, SVWRKS_GENERIC, HP_ZX1, + ALPHA_CORE_AGP, }; struct agp_version { @@ -102,6 +92,7 @@ int current_memory; int cant_use_aperture; unsigned long page_mask; + struct vm_operations_struct *vm_ops; } agp_kern_info; /* diff -Nru a/include/linux/buffer_head.h b/include/linux/buffer_head.h --- a/include/linux/buffer_head.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/buffer_head.h Wed Feb 12 14:47:57 2003 @@ -169,6 +169,7 @@ void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); void ll_rw_block(int, int, struct buffer_head * bh[]); +void sync_dirty_buffer(struct buffer_head *bh); int submit_bh(int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); diff -Nru a/include/linux/hfs_sysdep.h b/include/linux/hfs_sysdep.h --- a/include/linux/hfs_sysdep.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/hfs_sysdep.h Wed Feb 12 14:47:57 2003 @@ -155,13 +155,8 @@ } static inline void hfs_buffer_sync(hfs_buffer buffer) { - while (buffer_locked(buffer)) { - wait_on_buffer(buffer); - } - if (buffer_dirty(buffer)) { - ll_rw_block(WRITE, 1, &buffer); - wait_on_buffer(buffer); - } + if (buffer_dirty(buffer)) + sync_dirty_buffer(buffer); } static inline void *hfs_buffer_data(const hfs_buffer buffer) { diff -Nru a/include/linux/highmem.h b/include/linux/highmem.h --- a/include/linux/highmem.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/highmem.h Wed Feb 12 14:47:58 2003 @@ -3,6 +3,8 @@ #include #include +#include + #include #ifdef CONFIG_HIGHMEM diff -Nru a/include/linux/irq.h b/include/linux/irq.h --- a/include/linux/irq.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/irq.h Wed Feb 12 14:47:57 2003 @@ -56,15 +56,13 @@ * * Pad this out to 32 bytes for cache and indexing reasons. */ -typedef struct { +typedef struct irq_desc { unsigned int status; /* IRQ status */ hw_irq_controller *handler; struct irqaction *action; /* IRQ action list */ unsigned int depth; /* nested irq disables */ spinlock_t lock; } ____cacheline_aligned irq_desc_t; - -extern irq_desc_t irq_desc [NR_IRQS]; #include /* the arch dependent stuff */ diff -Nru a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h --- a/include/linux/irq_cpustat.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/irq_cpustat.h Wed Feb 12 14:47:58 2003 @@ -24,7 +24,7 @@ #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else #define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member) -#endif +#endif #endif /* arch independent irq_stat fields */ @@ -33,5 +33,10 @@ #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ + +#define local_softirq_pending() softirq_pending(smp_processor_id()) +#define local_syscall_count() syscall_count(smp_processor_id()) +#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) +#define local_nmi_count() nmi_count(smp_processor_id()) #endif /* __irq_cpustat_h */ diff -Nru a/include/linux/percpu.h b/include/linux/percpu.h --- a/include/linux/percpu.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/percpu.h Wed Feb 12 14:47:58 2003 @@ -1,9 +1,8 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H -#include /* For preempt_disable() */ +#include /* For preempt_disable() */ #include /* For kmalloc_percpu() */ #include - /* Must be an lvalue. */ #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); })) #define put_cpu_var(var) preempt_enable() diff -Nru a/include/linux/ptrace.h b/include/linux/ptrace.h --- a/include/linux/ptrace.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/ptrace.h Wed Feb 12 14:47:57 2003 @@ -4,6 +4,7 @@ /* structs and defines to help the user use the ptrace system call. */ #include +#include /* has the defines to get at the registers. */ diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/sched.h Wed Feb 12 14:47:57 2003 @@ -148,8 +148,8 @@ extern void init_idle(task_t *idle, int cpu); extern void show_state(void); -extern void show_trace(unsigned long *stack); -extern void show_stack(unsigned long *stack); +extern void show_trace(struct task_struct *); +extern void show_stack(struct task_struct *); extern void show_regs(struct pt_regs *); void io_schedule(void); @@ -490,14 +490,14 @@ #ifndef INIT_THREAD_SIZE # define INIT_THREAD_SIZE 2048*sizeof(long) -#endif - union thread_union { struct thread_info thread_info; unsigned long stack[INIT_THREAD_SIZE/sizeof(long)]; }; extern union thread_union init_thread_union; +#endif + extern struct task_struct init_task; extern struct mm_struct init_mm; diff -Nru a/include/linux/serial.h b/include/linux/serial.h --- a/include/linux/serial.h Wed Feb 12 14:47:57 2003 +++ b/include/linux/serial.h Wed Feb 12 14:47:57 2003 @@ -179,14 +179,9 @@ extern int register_serial(struct serial_struct *req); extern void unregister_serial(int line); -/* Allow complicated architectures to specify rs_table[] at run time */ -extern int early_serial_setup(struct serial_struct *req); - -#ifdef CONFIG_ACPI -/* tty ports reserved for the ACPI serial console port and debug port */ -#define ACPI_SERIAL_CONSOLE_PORT 4 -#define ACPI_SERIAL_DEBUG_PORT 5 -#endif +/* Allow architectures to override entries in serial8250_ports[] at run time: */ +struct uart_port; /* forward declaration */ +extern int early_serial_setup(struct uart_port *port); #endif /* __KERNEL__ */ #endif /* _LINUX_SERIAL_H */ diff -Nru a/include/linux/smp.h b/include/linux/smp.h --- a/include/linux/smp.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/smp.h Wed Feb 12 14:47:58 2003 @@ -58,10 +58,6 @@ */ extern int smp_threads_ready; -extern volatile unsigned long smp_msg_data; -extern volatile int smp_src_cpu; -extern volatile int smp_msg_id; - #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 diff -Nru a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h --- a/include/linux/sunrpc/svc.h Wed Feb 12 14:47:58 2003 +++ b/include/linux/sunrpc/svc.h Wed Feb 12 14:47:58 2003 @@ -73,7 +73,7 @@ * This assumes that the non-page part of an rpc reply will fit * in a page - NFSd ensures this. lockd also has no trouble. */ -#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 1) +#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2) static inline u32 svc_getu32(struct iovec *iov) { diff -Nru a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c Wed Feb 12 14:47:57 2003 +++ b/kernel/fork.c Wed Feb 12 14:47:57 2003 @@ -72,6 +72,7 @@ return total; } +#if 0 void __put_task_struct(struct task_struct *tsk) { if (tsk != current) { @@ -89,6 +90,7 @@ put_cpu(); } } +#endif void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { @@ -191,7 +193,11 @@ init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } -static struct task_struct *dup_task_struct(struct task_struct *orig) +#if 1 +extern struct task_struct *dup_task_struct (struct task_struct *orig); +#else + +struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; @@ -221,6 +227,8 @@ return tsk; } +#endif + #ifdef CONFIG_MMU static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) { @@ -859,11 +867,15 @@ if (clone_flags & CLONE_CHILD_SETTID) p->set_child_tid = child_tidptr; + else + p->set_child_tid = NULL; /* * Clear TID on mm_release()? */ if (clone_flags & CLONE_CHILD_CLEARTID) p->clear_child_tid = child_tidptr; + else + p->clear_child_tid = NULL; /* * Syscall tracing should be turned off in the child regardless diff -Nru a/kernel/ksyms.c b/kernel/ksyms.c --- a/kernel/ksyms.c Wed Feb 12 14:47:57 2003 +++ b/kernel/ksyms.c Wed Feb 12 14:47:57 2003 @@ -208,6 +208,7 @@ EXPORT_SYMBOL(__brelse); EXPORT_SYMBOL(__bforget); EXPORT_SYMBOL(ll_rw_block); +EXPORT_SYMBOL(sync_dirty_buffer); EXPORT_SYMBOL(submit_bh); EXPORT_SYMBOL(unlock_buffer); EXPORT_SYMBOL(__wait_on_buffer); @@ -402,7 +403,9 @@ EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); +#if !defined(CONFIG_IA64) EXPORT_SYMBOL(irq_stat); +#endif /* waitqueue handling */ EXPORT_SYMBOL(add_wait_queue); @@ -599,7 +602,9 @@ /* init task, for moving kthread roots - ought to export a function ?? */ EXPORT_SYMBOL(init_task); +#ifndef CONFIG_IA64 EXPORT_SYMBOL(init_thread_union); +#endif EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(find_task_by_pid); diff -Nru a/kernel/printk.c b/kernel/printk.c --- a/kernel/printk.c Wed Feb 12 14:47:58 2003 +++ b/kernel/printk.c Wed Feb 12 14:47:58 2003 @@ -305,6 +305,12 @@ __call_console_drivers(start, end); } } +#ifdef CONFIG_IA64_EARLY_PRINTK + if (!console_drivers) { + void early_printk (const char *str, size_t len); + early_printk(&LOG_BUF(start), end - start); + } +#endif } /* @@ -622,7 +628,11 @@ * for us. */ spin_lock_irqsave(&logbuf_lock, flags); +#ifdef CONFIG_IA64_EARLY_PRINTK + con_start = log_end; +#else con_start = log_start; +#endif spin_unlock_irqrestore(&logbuf_lock, flags); } release_console_sem(); @@ -675,3 +685,110 @@ tty->driver.write(tty, 0, msg, strlen(msg)); return; } + +#ifdef CONFIG_IA64_EARLY_PRINTK + +#include + +# ifdef CONFIG_IA64_EARLY_PRINTK_VGA + + +#define VGABASE ((char *)0xc0000000000b8000) +#define VGALINES 24 +#define VGACOLS 80 + +static int current_ypos = VGALINES, current_xpos = 0; + +static void +early_printk_vga (const char *str, size_t len) +{ + char c; + int i, k, j; + + while (len-- > 0) { + c = *str++; + if (current_ypos >= VGALINES) { + /* scroll 1 line up */ + for (k = 1, j = 0; k < VGALINES; k++, j++) { + for (i = 0; i < VGACOLS; i++) { + writew(readw(VGABASE + 2*(VGACOLS*k + i)), + VGABASE + 2*(VGACOLS*j + i)); + } + } + for (i = 0; i < VGACOLS; i++) { + writew(0x720, VGABASE + 2*(VGACOLS*j + i)); + } + current_ypos = VGALINES-1; + } + if (c == '\n') { + current_xpos = 0; + current_ypos++; + } else if (c != '\r') { + writew(((0x7 << 8) | (unsigned short) c), + VGABASE + 2*(VGACOLS*current_ypos + current_xpos++)); + if (current_xpos >= VGACOLS) { + current_xpos = 0; + current_ypos++; + } + } + } +} + +# endif /* CONFIG_IA64_EARLY_PRINTK_VGA */ + +# ifdef CONFIG_IA64_EARLY_PRINTK_UART + +#include +#include + +static void early_printk_uart(const char *str, size_t len) +{ + static char *uart = NULL; + unsigned long uart_base; + char c; + + if (!uart) { + uart_base = 0; +# ifdef CONFIG_SERIAL_8250_HCDP + { + extern unsigned long hcdp_early_uart(void); + uart_base = hcdp_early_uart(); + } +# endif +# if CONFIG_IA64_EARLY_PRINTK_UART_BASE + if (!uart_base) + uart_base = CONFIG_IA64_EARLY_PRINTK_UART_BASE; +# endif + if (!uart_base) + return; + + uart = ioremap(uart_base, 64); + if (!uart) + return; + } + + while (len-- > 0) { + c = *str++; + while ((readb(uart + UART_LSR) & UART_LSR_TEMT) == 0) + cpu_relax(); /* spin */ + + writeb(c, uart + UART_TX); + + if (c == '\n') + writeb('\r', uart + UART_TX); + } +} + +# endif /* CONFIG_IA64_EARLY_PRINTK_UART */ + +void early_printk(const char *str, size_t len) +{ +#ifdef CONFIG_IA64_EARLY_PRINTK_UART + early_printk_uart(str, len); +#endif +#ifdef CONFIG_IA64_EARLY_PRINTK_VGA + early_printk_vga(str, len); +#endif +} + +#endif /* CONFIG_IA64_EARLY_PRINTK */ diff -Nru a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c Wed Feb 12 14:47:57 2003 +++ b/kernel/softirq.c Wed Feb 12 14:47:57 2003 @@ -32,7 +32,10 @@ - Tasklets: serialized wrt itself. */ +/* No separate irq_stat for ia64, it is part of PSA */ +#if !defined(CONFIG_IA64) irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; +#endif /* CONFIG_IA64 */ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; @@ -63,7 +66,7 @@ local_irq_save(flags); cpu = smp_processor_id(); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending) { struct softirq_action *h; @@ -72,7 +75,7 @@ local_bh_disable(); restart: /* Reset the pending bitmask before enabling irqs */ - softirq_pending(cpu) = 0; + local_softirq_pending() = 0; local_irq_enable(); @@ -87,7 +90,7 @@ local_irq_disable(); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending & mask) { mask &= ~pending; goto restart; @@ -95,7 +98,7 @@ __local_bh_enable(); if (pending) - wakeup_softirqd(cpu); + wakeup_softirqd(smp_processor_id()); } local_irq_restore(flags); @@ -315,15 +318,15 @@ __set_current_state(TASK_INTERRUPTIBLE); mb(); - ksoftirqd_task(cpu) = current; + local_ksoftirqd_task() = current; for (;;) { - if (!softirq_pending(cpu)) + if (!local_softirq_pending()) schedule(); __set_current_state(TASK_RUNNING); - while (softirq_pending(cpu)) { + while (local_softirq_pending()) { do_softirq(); cond_resched(); } diff -Nru a/kernel/time.c b/kernel/time.c --- a/kernel/time.c Wed Feb 12 14:47:57 2003 +++ b/kernel/time.c Wed Feb 12 14:47:57 2003 @@ -36,7 +36,7 @@ */ struct timezone sys_tz; -extern unsigned long last_time_offset; +extern unsigned long last_nsec_offset; #if !defined(__alpha__) && !defined(__ia64__) @@ -80,7 +80,7 @@ write_seqlock_irq(&xtime_lock); xtime.tv_sec = value; xtime.tv_nsec = 0; - last_time_offset = 0; + last_nsec_offset = 0; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; @@ -126,7 +126,7 @@ { write_seqlock_irq(&xtime_lock); xtime.tv_sec += sys_tz.tz_minuteswest * 60; - last_time_offset = 0; + last_nsec_offset = 0; write_sequnlock_irq(&xtime_lock); } @@ -382,7 +382,7 @@ txc->calcnt = pps_calcnt; txc->errcnt = pps_errcnt; txc->stbcnt = pps_stbcnt; - last_time_offset = 0; + last_nsec_offset = 0; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); return(result); diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Wed Feb 12 14:47:58 2003 +++ b/kernel/timer.c Wed Feb 12 14:47:58 2003 @@ -442,6 +442,7 @@ /* The current time */ struct timespec xtime __attribute__ ((aligned (16))); +unsigned long last_nsec_offset; /* Don't completely fail for HZ > 500. */ int tickadj = 500/HZ ? : 1; /* microsecs */ @@ -592,7 +593,7 @@ /* in the NTP reference this is called "hardclock()" */ static void update_wall_time_one_tick(void) { - long time_adjust_step; + long time_adjust_step, delta_nsec; if ( (time_adjust_step = time_adjust) != 0 ) { /* We are doing an adjtime thing. @@ -608,11 +609,11 @@ time_adjust_step = tickadj; else if (time_adjust < -tickadj) time_adjust_step = -tickadj; - + /* Reduce by this step the amount of time left */ time_adjust -= time_adjust_step; } - xtime.tv_nsec += tick_nsec + time_adjust_step * 1000; + delta_nsec = tick_nsec + time_adjust_step * 1000; /* * Advance the phase, once it gets to one microsecond, then * advance the tick more. @@ -621,13 +622,33 @@ if (time_phase <= -FINEUSEC) { long ltemp = -time_phase >> (SHIFT_SCALE - 10); time_phase += ltemp << (SHIFT_SCALE - 10); - xtime.tv_nsec -= ltemp; + delta_nsec -= ltemp; } else if (time_phase >= FINEUSEC) { long ltemp = time_phase >> (SHIFT_SCALE - 10); time_phase -= ltemp << (SHIFT_SCALE - 10); - xtime.tv_nsec += ltemp; + delta_nsec += ltemp; + } + xtime.tv_nsec += delta_nsec; + + /* + * The whole point of last_nsec_offset is that it can be updated atomically and + * lock-free. Thus, arches that don't have __HAVE_ARCH_CMPXCHG probably can't use + * last_nsec_offset anyhow... --davidm 2003-Feb-11 + */ +#ifdef __HAVE_ARCH_CMPXCHG + if (last_nsec_offset > 0) { + unsigned long new, old; + + do { + old = last_nsec_offset; + if (old > delta_nsec) + new = old - delta_nsec; + else + new = 0; + } while (cmpxchg(&last_nsec_offset, old, new) != old); } +#endif } /* @@ -762,7 +783,6 @@ * playing with xtime and avenrun. */ seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; -unsigned long last_time_offset; /* * This function runs timers and the timer-tq in bottom half context. @@ -796,7 +816,6 @@ wall_jiffies += ticks; update_wall_time(ticks); } - last_time_offset = 0; calc_load(ticks); } diff -Nru a/mm/bootmem.c b/mm/bootmem.c --- a/mm/bootmem.c Wed Feb 12 14:47:57 2003 +++ b/mm/bootmem.c Wed Feb 12 14:47:57 2003 @@ -143,6 +143,7 @@ static void * __init __alloc_bootmem_core (bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal) { + static unsigned long last_success; unsigned long i, start = 0; void *ret; unsigned long offset, remaining_size; @@ -168,6 +169,9 @@ if (goal && (goal >= bdata->node_boot_start) && ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { preferred = goal - bdata->node_boot_start; + + if (last_success >= preferred) + preferred = last_success; } else preferred = 0; @@ -179,6 +183,8 @@ restart_scan: for (i = preferred; i < eidx; i += incr) { unsigned long j; + i = find_next_zero_bit((char *)bdata->node_bootmem_map, eidx, i); + i = (i + incr - 1) & -incr; if (test_bit(i, bdata->node_bootmem_map)) continue; for (j = i + 1; j < i + areasize; ++j) { @@ -197,6 +203,7 @@ } return NULL; found: + last_success = start << PAGE_SHIFT; if (start >= eidx) BUG(); @@ -256,21 +263,21 @@ map = bdata->node_bootmem_map; for (i = 0; i < idx; ) { unsigned long v = ~map[i / BITS_PER_LONG]; - if (v) { + if (v) { unsigned long m; - for (m = 1; m && i < idx; m<<=1, page++, i++) { + for (m = 1; m && i < idx; m<<=1, page++, i++) { if (v & m) { - count++; - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); - } - } + count++; + ClearPageReserved(page); + set_page_count(page, 1); + __free_page(page); + } + } } else { i+=BITS_PER_LONG; - page+=BITS_PER_LONG; - } - } + page+=BITS_PER_LONG; + } + } total += count; /* diff -Nru a/mm/fadvise.c b/mm/fadvise.c --- a/mm/fadvise.c Wed Feb 12 14:47:57 2003 +++ b/mm/fadvise.c Wed Feb 12 14:47:57 2003 @@ -20,7 +20,7 @@ * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could * deactivate the pages and clear PG_Referenced. */ -int sys_fadvise64(int fd, loff_t offset, size_t len, int advice) +long sys_fadvise64(int fd, loff_t offset, size_t len, int advice) { struct file *file = fget(fd); struct inode *inode; diff -Nru a/mm/memory.c b/mm/memory.c --- a/mm/memory.c Wed Feb 12 14:47:57 2003 +++ b/mm/memory.c Wed Feb 12 14:47:57 2003 @@ -113,8 +113,10 @@ } pmd = pmd_offset(dir, 0); pgd_clear(dir); - for (j = 0; j < PTRS_PER_PMD ; j++) + for (j = 0; j < PTRS_PER_PMD ; j++) { + prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd)); free_one_pmd(tlb, pmd+j); + } pmd_free_tlb(tlb, pmd); } diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c --- a/mm/page_alloc.c Wed Feb 12 14:47:57 2003 +++ b/mm/page_alloc.c Wed Feb 12 14:47:57 2003 @@ -1144,6 +1144,41 @@ memset(pgdat->valid_addr_bitmap, 0, size); } +static void __init memmap_init(struct page *start, unsigned long size, + int nid, unsigned long zone, unsigned long start_pfn) +{ + struct page *page; + + /* + * Initially all pages are reserved - free ones are freed + * up by free_all_bootmem() once the early boot process is + * done. Non-atomic initialization, single-pass. + */ + + for (page = start; page < (start + size); page++) { + set_page_zone(page, nid * MAX_NR_ZONES + zone); + set_page_count(page, 0); + SetPageReserved(page); + INIT_LIST_HEAD(&page->list); +#ifdef WANT_PAGE_VIRTUAL + if (zone != ZONE_HIGHMEM) + /* + * The shift left won't overflow because the + * ZONE_NORMAL is below 4G. + */ + set_page_address(page, __va(start_pfn << PAGE_SHIFT)); +#endif + start_pfn++; + } +} + +#ifdef HAVE_ARCH_MEMMAP_INIT +#define MEMMAP_INIT(start, size, nid, zone, start_pfn) \ + arch_memmap_init(memmap_init, start, size, nid, zone, start_pfn) +#else +#define MEMMAP_INIT(start, size, nid, zone, start_pfn) \ + memmap_init(start, size, nid, zone, start_pfn) +#endif /* * Set up the zone data structures: * - mark all pages reserved @@ -1255,28 +1290,8 @@ if ((zone_start_pfn) & (zone_required_alignment-1)) printk("BUG: wrong zone alignment, it will crash\n"); - /* - * Initially all pages are reserved - free ones are freed - * up by free_all_bootmem() once the early boot process is - * done. Non-atomic initialization, single-pass. - */ - for (i = 0; i < size; i++) { - struct page *page = lmem_map + local_offset + i; - set_page_zone(page, nid * MAX_NR_ZONES + j); - set_page_count(page, 0); - SetPageReserved(page); - INIT_LIST_HEAD(&page->list); -#ifdef WANT_PAGE_VIRTUAL - if (j != ZONE_HIGHMEM) - /* - * The shift left won't overflow because the - * ZONE_NORMAL is below 4G. - */ - set_page_address(page, - __va(zone_start_pfn << PAGE_SHIFT)); -#endif - zone_start_pfn++; - } + MEMMAP_INIT(lmem_map + local_offset,size,nid,j,zone_start_pfn); + zone_start_pfn += size; local_offset += size; for (i = 0; ; i++) { diff -Nru a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c --- a/net/bluetooth/af_bluetooth.c Wed Feb 12 14:47:57 2003 +++ b/net/bluetooth/af_bluetooth.c Wed Feb 12 14:47:57 2003 @@ -92,7 +92,7 @@ static int bt_sock_create(struct socket *sock, int proto) { - if (proto > BT_MAX_PROTO) + if (proto >= BT_MAX_PROTO) return -EINVAL; #if defined(CONFIG_KMOD) diff -Nru a/net/core/rtnetlink.c b/net/core/rtnetlink.c --- a/net/core/rtnetlink.c Wed Feb 12 14:47:58 2003 +++ b/net/core/rtnetlink.c Wed Feb 12 14:47:58 2003 @@ -351,7 +351,7 @@ return 0; family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; - if (family > NPROTO) { + if (family >= NPROTO) { *errp = -EAFNOSUPPORT; return -1; } diff -Nru a/scripts/kallsyms.c b/scripts/kallsyms.c --- a/scripts/kallsyms.c Wed Feb 12 14:47:58 2003 +++ b/scripts/kallsyms.c Wed Feb 12 14:47:58 2003 @@ -12,6 +12,15 @@ #include #include +#include + +#if CONFIG_ALPHA || CONFIG_IA64 || CONFIG_MIPS64 || CONFIG_PPC64 || CONFIG_S390X \ + || CONFIG_SPARC64 || CONFIG_X86_64 +# define ADDR_DIRECTIVE ".quad" +#else +# define ADDR_DIRECTIVE ".long" +#endif + struct sym_entry { unsigned long long addr; char type; diff -Nru a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c --- a/sound/oss/cs4281/cs4281m.c Wed Feb 12 14:47:58 2003 +++ b/sound/oss/cs4281/cs4281m.c Wed Feb 12 14:47:58 2003 @@ -1946,8 +1946,8 @@ len -= x; } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO - "cs4281: clear_advance(): memset %d at 0x%.8x for %d size \n", - (unsigned)c, (unsigned)((char *) buf) + bptr, len)); + "cs4281: clear_advance(): memset %d at %p for %d size \n", + (unsigned)c, ((char *) buf) + bptr, len)); memset(((char *) buf) + bptr, c, len); } @@ -1982,9 +1982,8 @@ wake_up(&s->dma_adc.wait); } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned)s, s->dma_adc.hwptr, - s->dma_adc.total_bytes, s->dma_adc.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count)); } // update DAC pointer // @@ -2016,11 +2015,10 @@ // Continue to play silence until the _release. // CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): memset %d at 0x%.8x for %d size \n", + "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n", (unsigned)(s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, - (unsigned)s->dma_dac.rawbuf, - s->dma_dac.dmasize)); + s->dma_dac.rawbuf, s->dma_dac.dmasize)); memset(s->dma_dac.rawbuf, (s->prop_dac. fmt & (AFMT_U8 | AFMT_U16_LE)) ? @@ -2051,9 +2049,8 @@ } } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned) s, s->dma_dac.hwptr, - s->dma_dac.total_bytes, s->dma_dac.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count)); } } @@ -2184,8 +2181,7 @@ VALIDATE_STATE(s); CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n", - (unsigned) s, cmd)); + "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -2750,9 +2746,8 @@ CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4281: CopySamples()+ ")); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " dst=0x%x src=0x%x count=%d iChannels=%d fmt=0x%x\n", - (unsigned) dst, (unsigned) src, (unsigned) count, - (unsigned) iChannels, (unsigned) fmt)); + " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n", + dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt)); // Gershwin does format conversion in hardware so normally // we don't do any host based coversion. The data formatter @@ -2832,9 +2827,9 @@ void *src = hwsrc; //default to the standard destination buffer addr CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO - "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=0x%.8x\n", + "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n", s->prop_adc.fmt, s->prop_adc.fmt_original, - (unsigned) cnt, (unsigned) dest)); + (unsigned) cnt, dest)); if (cnt > s->dma_adc.dmasize) { cnt = s->dma_adc.dmasize; @@ -2879,7 +2874,7 @@ unsigned copied = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()+ %d \n", count)); + printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count)); VALIDATE_STATE(s); if (ppos != &file->f_pos) @@ -2902,7 +2897,7 @@ // while (count > 0) { CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n", + "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n", count, s->dma_adc.count, s->dma_adc.swptr, s->dma_adc.hwptr)); spin_lock_irqsave(&s->lock, flags); @@ -2959,11 +2954,10 @@ // the "cnt" is the number of bytes to read. CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO - "_read() copy_to cnt=%d count=%d ", cnt, count)); + "_read() copy_to cnt=%d count=%Zu ", cnt, count)); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " .dmasize=%d .count=%d buffer=0x%.8x ret=%d\n", - s->dma_adc.dmasize, s->dma_adc.count, - (unsigned) buffer, ret)); + " .dmasize=%d .count=%d buffer=%p ret=%Zd\n", + s->dma_adc.dmasize, s->dma_adc.count, buffer, ret)); if (cs_copy_to_user (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied)) @@ -2979,7 +2973,7 @@ start_adc(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret)); return ret; } @@ -2995,7 +2989,7 @@ int cnt; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()+ count=%d\n", + printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n", count)); VALIDATE_STATE(s); @@ -3051,7 +3045,7 @@ start_dac(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret)); return ret; } @@ -3172,8 +3166,7 @@ int val, mapped, ret; CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: cs4281_ioctl(): file=0x%.8x cmd=0x%.8x\n", - (unsigned) file, cmd)); + "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -3603,8 +3596,8 @@ (struct cs4281_state *) file->private_data; CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO - "cs4281: cs4281_release(): inode=0x%.8x file=0x%.8x f_mode=%d\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n", + inode, file, file->f_mode)); VALIDATE_STATE(s); @@ -3638,8 +3631,8 @@ struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO - "cs4281: cs4281_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n", + inode, file, file->f_mode)); list_for_each(entry, &cs4281_devs) { @@ -4348,10 +4341,8 @@ CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO - "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=0x%.8x pBA1=0x%.8x \n", - (unsigned) temp1, (unsigned) temp2, - (unsigned) s->pBA0, (unsigned) s->pBA1)); - + "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n", + (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1)); CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n", @@ -4398,15 +4389,13 @@ if (pmdev) { CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs4281: probe() pm_register() succeeded (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() succeeded (%p).\n", pmdev)); pmdev->data = s; } else { CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO - "cs4281: probe() pm_register() failed (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() failed (%p).\n", pmdev)); s->pm.flags |= CS4281_PM_NOT_REGISTERED; } #endif diff -Nru a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c --- a/sound/oss/cs4281/cs4281pm-24.c Wed Feb 12 14:47:58 2003 +++ b/sound/oss/cs4281/cs4281pm-24.c Wed Feb 12 14:47:58 2003 @@ -46,8 +46,8 @@ struct cs4281_state *state; CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: cs4281_pm_callback dev=0x%x rqst=0x%x state=%d\n", - (unsigned)dev,(unsigned)rqst,(unsigned)data)); + "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n", + dev,(unsigned)rqst,data)); state = (struct cs4281_state *) dev->data; if (state) { switch(rqst) { diff -Nru a/usr/Makefile b/usr/Makefile --- a/usr/Makefile Wed Feb 12 14:47:58 2003 +++ b/usr/Makefile Wed Feb 12 14:47:58 2003 @@ -5,12 +5,9 @@ clean-files := initramfs_data.cpio.gz -LDFLAGS_initramfs_data.o := $(LDFLAGS_BLOB) -r -T - -$(obj)/initramfs_data.o: $(src)/initramfs_data.scr $(obj)/initramfs_data.cpio.gz FORCE - $(call if_changed,ld) - $(obj)/initramfs_data.cpio.gz: $(obj)/gen_init_cpio ./$< | gzip -9c > $@ - +$(obj)/initramfs_data.S: $(obj)/initramfs_data.cpio.gz + echo '.section ".init.ramfs", "a"' > $@ + od -v -An -t x1 -w8 $^ | cut -c2- | sed -e s"/ /,0x/g" -e s"/^/.byte 0x"/ >> $@