summaryrefslogtreecommitdiff
path: root/arch/cris
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris')
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/include/asm/elf.h2
-rw-r--r--arch/cris/kernel/irq.c4
3 files changed, 33 insertions, 35 deletions
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c..f171a6600fb 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
rw->lock++;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 0f51b10b9f4..8a3d8e2b33c 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc..b5ce0724a88 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}