summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/asm-h8300/bitops.h2
-rw-r--r--include/asm-i386/pgtable-3level.h1
-rw-r--r--include/asm-m32r/unistd.h15
-rw-r--r--include/asm-um/dma-mapping.h2
-rw-r--r--include/asm-um/smp.h6
-rw-r--r--include/linux/acct.h23
-rw-r--r--include/linux/highmem.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/times.h20
10 files changed, 56 insertions, 27 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 29573197c3ee..cf791b073e76 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -13,11 +13,19 @@
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
*/
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_atomic(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
#endif
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index 893e6f1c7efd..94b7a46a6043 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -273,6 +273,8 @@ found_middle:
return result + __ffs(tmp);
}
+#define find_first_bit(addr, size) find_next_bit(addr, size, 0)
+
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index e48ab3d07c83..80a24b01d47e 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -54,6 +54,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
}
+#define __HAVE_ARCH_SET_PTE_ATOMIC
#define set_pte_atomic(pteptr,pteval) \
set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
#define set_pmd(pmdptr,pmdval) \
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 31aca939104f..a506573b7b69 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -294,25 +294,16 @@
#define __NR_mq_getsetattr (__NR_mq_open+5)
#define __NR_sys_kexec_load 283
#define __NR_waitid 284
-#define __NR_perfctr_info 285
-#define __NR_vperfctr_open (__NR_perfctr_info+1)
-#define __NR_vperfctr_control (__NR_perfctr_info+2)
-#define __NR_vperfctr_unlink (__NR_perfctr_info+3)
-#define __NR_vperfctr_iresume (__NR_perfctr_info+4)
-#define __NR_vperfctr_read (__NR_perfctr_info+5)
-#define __NR_add_key 291
-#define __NR_request_key 292
-#define __NR_keyctl 293
-#define NR_syscalls 294
+#define NR_syscalls 285
-/* user-visible error numbers are in the range -1 - -128: see
+/* user-visible error numbers are in the range -1 - -124: see
* <asm-m32r/errno.h>
*/
#define __syscall_return(type, res) \
do { \
- if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \
+ if ((unsigned long)(res) >= (unsigned long)(-(124 + 1))) { \
/* Avoid using "res" which is declared to be in register r0; \
errno might expand to a function call and clobber it. */ \
int __err = -(res); \
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index 2ea88281161d..13e6291f7151 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -1,6 +1,8 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
+#include <asm/scatterlist.h>
+
static inline int
dma_supported(struct device *dev, u64 mask)
{
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index ee768c27ca0e..4412d5d9c26b 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -8,10 +8,6 @@
#include "asm/current.h"
#include "linux/cpumask.h"
-extern cpumask_t cpu_online_map;
-extern cpumask_t cpu_possible_map;
-
-
#define smp_processor_id() (current_thread->cpu)
#define cpu_logical_map(n) (n)
#define cpu_number_map(n) (n)
@@ -19,8 +15,6 @@ extern cpumask_t cpu_possible_map;
extern int hard_smp_processor_id(void);
#define NO_PROC_ID -1
-#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
-
extern int ncpus;
diff --git a/include/linux/acct.h b/include/linux/acct.h
index b46ce1ac1c6a..a6ab17c49aa1 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -172,17 +172,24 @@ static inline u32 jiffies_to_AHZ(unsigned long x)
#endif
}
-static inline u64 jiffies_64_to_AHZ(u64 x)
+static inline u64 nsec_to_AHZ(u64 x)
{
-#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
-#if HZ != AHZ
- do_div(x, HZ / AHZ);
-#endif
-#else
- x *= TICK_NSEC;
+#if (NSEC_PER_SEC % AHZ) == 0
do_div(x, (NSEC_PER_SEC / AHZ));
+#elif (AHZ % 512) == 0
+ x *= AHZ/512;
+ do_div(x, (NSEC_PER_SEC / 512));
+#else
+ /*
+ * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024,
+ * overflow after 64.99 years.
+ * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+ */
+ x *= 9;
+ do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2))
+ / AHZ));
#endif
- return x;
+ return x;
}
#endif /* __KERNEL */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 232d8fdb557c..7153aef34d5c 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -40,6 +40,8 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
}
static inline void clear_highpage(struct page *page)
@@ -73,6 +75,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, unsign
copy_user_page(vto, vfrom, vaddr, to);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
}
static inline void copy_highpage(struct page *to, struct page *from)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 90f5cb645116..8810b551082a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -508,7 +508,7 @@ struct task_struct {
struct timer_list real_timer;
unsigned long utime, stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
- u64 start_time;
+ struct timespec start_time;
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
/* process credentials */
diff --git a/include/linux/times.h b/include/linux/times.h
index ff00f334ffaa..0c5aa078dad4 100644
--- a/include/linux/times.h
+++ b/include/linux/times.h
@@ -55,6 +55,26 @@ static inline u64 jiffies_64_to_clock_t(u64 x)
}
#endif
+static inline u64 nsec_to_clock_t(u64 x)
+{
+#if (NSEC_PER_SEC % USER_HZ) == 0
+ do_div(x, (NSEC_PER_SEC / USER_HZ));
+#elif (USER_HZ % 512) == 0
+ x *= USER_HZ/512;
+ do_div(x, (NSEC_PER_SEC / 512));
+#else
+ /*
+ * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
+ * overflow after 64.99 years.
+ * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+ */
+ x *= 9;
+ do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
+ / USER_HZ));
+#endif
+ return x;
+}
+
struct tms {
clock_t tms_utime;
clock_t tms_stime;