diff options
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r-- | arch/m32r/kernel/init_task.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 30 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/time.c | 74 |
5 files changed, 24 insertions, 94 deletions
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index fce57e5d3f9..6c42d5f8df5 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98b8feb12ed..98682bba0ed 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; @@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, struct user * dummy = NULL; #endif - if ((off & 3) || off < 0 || - off > sizeof(struct user) - 3) + if ((off & 3) || off > sizeof(struct user) - 3) return -EIO; off >>= 2; diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 929e5c9d3ad..1b7598e6f6e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *); void smp_local_timer_interrupt(void); static void send_IPI_allbutself(int, int); -static void send_IPI_mask(cpumask_t, int, int); +static void send_IPI_mask(const struct cpumask *, int, int); unsigned long send_IPI_mask_phys(cpumask_t, int, int); /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ @@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int); void smp_send_reschedule(int cpu_id) { WARN_ON(cpu_is_offline(cpu_id)); - send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); + send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); } /*==========================================================================* @@ -168,7 +168,7 @@ void smp_flush_cache_all(void) spin_lock(&flushcache_lock); mask=cpus_addr(cpumask); atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); - send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); @@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); if (*mmc != NO_CONTEXT) { @@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) if (mm == current->mm) activate_context(mm); else - cpu_clear(cpu_id, mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); local_irq_restore(flags); } if (!cpus_empty(cpu_mask)) @@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; - cpu_mask = mm->cpu_vm_mask; + cpu_mask = *mm_cpumask(mm); cpu_clear(cpu_id, cpu_mask); #ifdef DEBUG_SMP @@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); + send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); while (!cpus_empty(flush_cpumask)) { /* nothing. lockup detection does not belong here */ @@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void) if (flush_mm == current->active_mm) activate_context(flush_mm); else - cpu_clear(cpu_id, flush_mm->cpu_vm_mask); + cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); } else { unsigned long va = flush_va; @@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy) for ( ; ; ); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); } void arch_send_call_function_single_ipi(int cpu) { - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); + send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); } /*==========================================================================* @@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try) cpumask = cpu_online_map; cpu_clear(smp_processor_id(), cpumask); - send_IPI_mask(cpumask, ipi_num, try); + send_IPI_mask(&cpumask, ipi_num, try); } /*==========================================================================* @@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try) * ---------- --- -------------------------------------------------------- * *==========================================================================*/ -static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) +static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) { cpumask_t physid_mask, tmp; int cpu_id, phys_id; @@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) if (num_cpus <= 1) /* NO MP */ return; - cpus_and(tmp, cpumask, cpu_online_map); - BUG_ON(!cpus_equal(cpumask, tmp)); + cpumask_and(&tmp, cpumask, cpu_online_mask); + BUG_ON(!cpumask_equal(cpumask, &tmp)); physid_mask = CPU_MASK_NONE; - for_each_cpu_mask(cpu_id, cpumask){ + for_each_cpu(cpu_id, cpumask) { if ((phys_id = cpu_to_physid(cpu_id)) != -1) cpu_set(phys_id, physid_mask); } diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2547d6c4a82..e034844cfc0 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) physid_set(phys_id, phys_cpu_present_map); #ifndef CONFIG_HOTPLUG_CPU - cpu_present_map = cpu_possible_map; + init_cpu_present(&cpu_possible_map); #endif show_mp_info(nr_cpu); @@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (!physid_isset(phys_id, phys_cpu_present_map)) continue; - if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) + if (max_cpus <= cpucount + 1) continue; do_boot_cpu(phys_id); diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba4b99..ba61c4c7320 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -48,7 +48,7 @@ extern void smp_local_timer_interrupt(void); static unsigned long latch; -static unsigned long do_gettimeoffset(void) +u32 arch_gettimeoffset(void) { unsigned long elapsed_time = 0; /* [us] */ @@ -93,79 +93,10 @@ static unsigned long do_gettimeoffset(void) #error no chip configuration #endif - return elapsed_time; + return elapsed_time * 1000; } /* - * This version of gettimeofday has near microsecond resolution. - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin(&xtime_lock); - - usec = do_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += (xtime.tv_nsec / 1000); - } while (read_seqretry(&xtime_lock, seq)); - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * made, and then undo it! - */ - nsec -= do_gettimeoffset() * NSEC_PER_USEC; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - -/* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will @@ -192,6 +123,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif + /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ do_timer(1); #ifndef CONFIG_SMP |