summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-23 21:57:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-23 21:57:36 -0700
commita7993279fb7a54aa0beb4159bb1d1ea214372420 (patch)
tree44085882fd8a28407d768159f3228e1a58a24321 /include
parent8f77e95efa77e25bd14686f483e2ed25525c37f2 (diff)
Make bitops/cpumask functions be "const" where appropriate.
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc64/bitops.h6
-rw-r--r--include/asm-s390/byteorder.h6
-rw-r--r--include/linux/byteorder/swab.h6
-rw-r--r--include/linux/cpumask.h14
4 files changed, 16 insertions, 16 deletions
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
index feeeb9b77b29..6d820e7ea772 100644
--- a/include/asm-ppc64/bitops.h
+++ b/include/asm-ppc64/bitops.h
@@ -288,15 +288,15 @@ static __inline__ int ffs(int x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
-extern unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset);
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
-extern unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset);
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
-extern unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset);
+extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_zero_le_bit(addr, size) \
find_next_zero_le_bit((addr), (size), 0)
diff --git a/include/asm-s390/byteorder.h b/include/asm-s390/byteorder.h
index 91a6f3893bcd..2cc35a0e188e 100644
--- a/include/asm-s390/byteorder.h
+++ b/include/asm-s390/byteorder.h
@@ -14,7 +14,7 @@
#ifdef __GNUC__
#ifdef __s390x__
-static __inline__ __u64 ___arch__swab64p(__u64 *x)
+static __inline__ __u64 ___arch__swab64p(const __u64 *x)
{
__u64 result;
@@ -40,7 +40,7 @@ static __inline__ void ___arch__swab64s(__u64 *x)
}
#endif /* __s390x__ */
-static __inline__ __u32 ___arch__swab32p(__u32 *x)
+static __inline__ __u32 ___arch__swab32p(const __u32 *x)
{
__u32 result;
@@ -77,7 +77,7 @@ static __inline__ void ___arch__swab32s(__u32 *x)
*x = ___arch__swab32p(x);
}
-static __inline__ __u16 ___arch__swab16p(__u16 *x)
+static __inline__ __u16 ___arch__swab16p(const __u16 *x)
{
__u16 result;
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
index 02ad0b5246e9..2f1cb775125a 100644
--- a/include/linux/byteorder/swab.h
+++ b/include/linux/byteorder/swab.h
@@ -134,7 +134,7 @@ static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
{
return __arch__swab16(x);
}
-static __inline__ __u16 __swab16p(__u16 *x)
+static __inline__ __u16 __swab16p(const __u16 *x)
{
return __arch__swab16p(x);
}
@@ -147,7 +147,7 @@ static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
{
return __arch__swab32(x);
}
-static __inline__ __u32 __swab32p(__u32 *x)
+static __inline__ __u32 __swab32p(const __u32 *x)
{
return __arch__swab32p(x);
}
@@ -167,7 +167,7 @@ static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
return __arch__swab64(x);
# endif
}
-static __inline__ __u64 __swab64p(__u64 *x)
+static __inline__ __u64 __swab64p(const __u64 *x)
{
return __arch__swab64p(x);
}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1d06e38480f7..ecca23548b64 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -171,19 +171,19 @@ static inline int __cpus_subset(cpumask_t *src1p,
}
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(cpumask_t *srcp, int nbits)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(cpumask_t *srcp, int nbits)
+static inline int __cpus_full(const cpumask_t *srcp, int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(cpumask_t *srcp, int nbits)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
@@ -191,7 +191,7 @@ static inline int __cpus_weight(cpumask_t *srcp, int nbits)
#define cpus_shift_right(dst, src, n) \
__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_right(cpumask_t *dstp,
- cpumask_t *srcp, int n, int nbits)
+ const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
@@ -199,19 +199,19 @@ static inline void __cpus_shift_right(cpumask_t *dstp,
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
- cpumask_t *srcp, int n, int nbits)
+ const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
-static inline int __first_cpu(cpumask_t *srcp, int nbits)
+static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
return find_first_bit(srcp->bits, nbits);
}
#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
-static inline int __next_cpu(int n, cpumask_t *srcp, int nbits)
+static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
return find_next_bit(srcp->bits, nbits, n+1);
}