summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:12:58 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:12:58 -0800
commit908920b1d370e7a5c301d14cfce10c310be19be3 (patch)
tree25606bf2c5215cc0e25c9647612390b88622b279 /include
parentd01b7e92c0020f89b4bb33fe61c0dffab7078b42 (diff)
v2.5.1.9 -> v2.5.1.10
- Kai Germaschewski: ISDN updates - Al Viro: start moving buffer cache indexing to "struct block_device *" - Greg KH: USB update - Russell King: fix up some ARM merge issues - Ingo Molnar: scalable scheduler
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/bitops.h8
-rw-r--r--include/asm-i386/mmu_context.h23
-rw-r--r--include/asm-i386/pgalloc.h1
-rw-r--r--include/asm-i386/smp.h15
-rw-r--r--include/asm-i386/smplock.h21
-rw-r--r--include/linux/fs.h36
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/kernel_stat.h3
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/sched.h164
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/usb.h27
-rw-r--r--include/linux/usbdevice_fs.h10
-rw-r--r--include/net/bluetooth/hci_usb.h2
14 files changed, 205 insertions, 116 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index a3063cacc919..b3d1e3a3f712 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -75,6 +75,14 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
:"=m" (ADDR)
:"Ir" (nr));
}
+
+static __inline__ void __clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
+}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 43cabfcf2aab..2f76058591fb 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -7,6 +7,29 @@
#include <asm/pgalloc.h>
/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 168
+ * bits is cleared.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
+# error update this function.
+#endif
+
+static inline int sched_find_first_zero_bit(char *bitmap)
+{
+ unsigned int *b = (unsigned int *)bitmap;
+ unsigned int rt;
+
+ rt = b[0] & b[1] & b[2] & b[3];
+ if (unlikely(rt != 0xffffffff))
+ return find_first_zero_bit(bitmap, MAX_RT_PRIO);
+
+ if (b[4] != ~0)
+ return ffz(b[4]) + MAX_RT_PRIO;
+ return ffz(b[5]) + 32 + MAX_RT_PRIO;
+}
+/*
* possibly do the LDT unload here?
*/
#define destroy_context(mm) do { } while(0)
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index 988c26a7576b..765e7c185ce6 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -224,6 +224,7 @@ struct tlb_state
{
struct mm_struct *active_mm;
int state;
+ char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index ce81f42c9b08..34820d6008d8 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -63,6 +63,7 @@ extern int cpu_sibling_map[];
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
+extern void smp_send_reschedule_all(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
@@ -104,7 +105,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial
* so this is correct in the x86 case.
*/
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->cpu)
static __inline int hard_smp_processor_id(void)
{
@@ -122,17 +123,5 @@ static __inline int logical_smp_processor_id(void)
#define NO_PROC_ID 0xFF /* No processor magic marker */
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
#endif
#endif
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
index 864351c543a4..10cfc1fd0c43 100644
--- a/include/asm-i386/smplock.h
+++ b/include/asm-i386/smplock.h
@@ -15,21 +15,22 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+ } \
} while (0)
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
} while (0)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a83af6000e23..b102c2d51d54 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1216,7 +1216,7 @@ extern int sync_buffers(kdev_t, int);
extern void sync_dev(kdev_t);
extern int fsync_dev(kdev_t);
extern int fsync_super(struct super_block *);
-extern int fsync_no_super(kdev_t);
+extern int fsync_no_super(struct block_device *);
extern void sync_inodes_sb(struct super_block *);
extern int osync_inode_buffers(struct inode *);
extern int osync_inode_data_buffers(struct inode *);
@@ -1358,7 +1358,20 @@ extern void remove_inode_hash(struct inode *);
extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
-extern struct buffer_head * getblk(kdev_t, sector_t, int);
+extern struct buffer_head * __getblk(struct block_device *, sector_t, int);
+static inline struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __getblk(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
struct bio;
@@ -1379,14 +1392,27 @@ static inline void bforget(struct buffer_head *buf)
extern int set_blocksize(kdev_t, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern struct buffer_head * bread(kdev_t, int, int);
+extern struct buffer_head * __bread(struct block_device *, int, int);
+static inline struct buffer_head * bread(kdev_t dev, int block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __bread(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
{
- return bread(sb->s_dev, block, sb->s_blocksize);
+ return __bread(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
{
- return getblk(sb->s_dev, block, sb->s_blocksize);
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
{
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 2cb980fbb6d5..cb0ba707e782 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -482,13 +482,13 @@ struct journal_s
/* Device, blocksize and starting block offset for the location
* where we store the journal. */
- kdev_t j_dev;
+ struct block_device * j_dev;
int j_blocksize;
unsigned int j_blk_offset;
/* Device which holds the client fs. For internal journal this
* will be equal to j_dev. */
- kdev_t j_fs_dev;
+ struct block_device * j_fs_dev;
/* Total maximum capacity of the journal region on disk. */
unsigned int j_maxlen;
@@ -649,7 +649,8 @@ extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *);
extern void journal_unlock_updates (journal_t *);
-extern journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
+extern journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
int start, int len, int bsize);
extern journal_t * journal_init_inode (struct inode *);
extern int journal_update_format (journal_t *);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index d685da827ef2..a75048fbcc4e 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -32,11 +32,12 @@ struct kernel_stat {
unsigned int ipackets, opackets;
unsigned int ierrors, oerrors;
unsigned int collisions;
- unsigned int context_swtch;
};
extern struct kernel_stat kstat;
+extern unsigned long nr_context_switches(void);
+
#if !defined(CONFIG_ARCH_S390)
/*
* Number of interrupts per specific IRQ source, since bootup
diff --git a/include/linux/list.h b/include/linux/list.h
index 0d04422e81d9..8c03f2f35082 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -19,6 +19,8 @@ struct list_head {
struct list_head *next, *prev;
};
+typedef struct list_head list_t;
+
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 905f2673680d..5751c5b6d0ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -72,8 +72,9 @@ extern unsigned long avenrun[]; /* Load averages */
#define CT_TO_SECS(x) ((x) / HZ)
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
-extern int nr_running, nr_threads;
+extern int nr_threads;
extern int last_pid;
+extern unsigned long nr_running(void);
#include <linux/fs.h>
#include <linux/time.h>
@@ -116,12 +117,6 @@ extern int last_pid;
#define SCHED_FIFO 1
#define SCHED_RR 2
-/*
- * This is an additional bit set when we want to
- * yield the CPU for one re-schedule..
- */
-#define SCHED_YIELD 0x10
-
struct sched_param {
int sched_priority;
};
@@ -139,7 +134,6 @@ struct completion;
* a separate lock).
*/
extern rwlock_t tasklist_lock;
-extern spinlock_t runqueue_lock;
extern spinlock_t mmlist_lock;
extern void sched_init(void);
@@ -151,6 +145,7 @@ extern void update_process_times(int user);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
extern void expire_task(struct task_struct *p);
+extern void idle_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
@@ -280,6 +275,9 @@ struct user_struct {
extern struct user_struct root_user;
#define INIT_USER (&root_user)
+typedef struct task_struct task_t;
+typedef struct prio_array prio_array_t;
+
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
@@ -297,37 +295,30 @@ struct task_struct {
int lock_depth; /* Lock depth */
-/*
- * offset 32 begins here on 32-bit platforms. We keep
- * all fields in a single cacheline that are needed for
- * the goodness() loop in schedule().
- */
- unsigned long dyn_prio;
- long nice;
- unsigned long policy;
- struct mm_struct *mm;
- int processor;
/*
- * cpus_runnable is ~0 if the process is not running on any
- * CPU. It's (1 << cpu) if it's running on a CPU. This mask
- * is updated under the runqueue lock.
- *
- * To determine whether a process might run on a CPU, this
- * mask is AND-ed with cpus_allowed.
+ * offset 32 begins here on 32-bit platforms.
*/
- unsigned long cpus_runnable, cpus_allowed;
- /*
- * (only the 'next' pointer fits into the cacheline, but
- * that's just fine.)
- */
- struct list_head run_list;
- long time_slice;
- /* recalculation loop checkpoint */
- unsigned long rcl_last;
+ unsigned int cpu;
+ int prio;
+ long __nice;
+ list_t run_list;
+ prio_array_t *array;
+
+ unsigned int time_slice;
+ unsigned long sleep_timestamp, run_timestamp;
+
+ #define SLEEP_HIST_SIZE 4
+ int sleep_hist[SLEEP_HIST_SIZE];
+ int sleep_idx;
+
+ unsigned long policy;
+ unsigned long cpus_allowed;
struct task_struct *next_task, *prev_task;
- struct mm_struct *active_mm;
+
+ struct mm_struct *mm, *active_mm;
struct list_head local_pages;
+
unsigned int allocation_order, nr_local_pages;
/* task state */
@@ -452,11 +443,66 @@ struct task_struct {
*/
#define _STK_LIM (8*1024*1024)
-#define MAX_DYNPRIO 40
-#define DEF_TSLICE (6 * HZ / 100)
-#define MAX_TSLICE (20 * HZ / 100)
-#define DEF_NICE (0)
+/*
+ * RT priorites go from 0 to 99, but internally we max
+ * them out at 128 to make it easier to search the
+ * scheduler bitmap.
+ */
+#define MAX_RT_PRIO 128
+/*
+ * The lower the priority of a process, the more likely it is
+ * to run. Priority of a process goes from 0 to 167. The 0-99
+ * priority range is allocated to RT tasks, the 128-167 range
+ * is for SCHED_OTHER tasks.
+ */
+#define MAX_PRIO (MAX_RT_PRIO+40)
+#define DEF_USER_NICE 0
+
+/*
+ * Scales user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ 24 ... 63 (MAX_PRIO-1) ]
+ *
+ * User-nice value of -20 == static priority 24, and
+ * user-nice value 19 == static priority 63. The lower
+ * the priority value, the higher the task's priority.
+ *
+ * Note that while static priority cannot go below 24,
+ * the priority of a process can go as low as 0.
+ */
+#define NICE_TO_PRIO(n) (MAX_PRIO-1 + (n) - 19)
+
+#define DEF_PRIO NICE_TO_PRIO(DEF_USER_NICE)
+/*
+ * Default timeslice is 90 msecs, maximum is 150 msecs.
+ * Minimum timeslice is 20 msecs.
+ */
+#define MIN_TIMESLICE ( 20 * HZ / 1000)
+#define MAX_TIMESLICE (150 * HZ / 1000)
+
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+/*
+ * PRIO_TO_TIMESLICE scales priority values [ 100 ... 139 ]
+ * to initial time slice values [ MAX_TIMESLICE (150 msec) ... 2 ]
+ *
+ * The higher a process's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority process gets MIN_TIMESLICE worth of execution time.
+ */
+#define PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_USER_PRIO-1-USER_PRIO(p))*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_USER_PRIO-1) / MAX_USER_PRIO) + MIN_TIMESLICE)
+
+#define RT_PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_RT_PRIO-(p)-1)*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_RT_PRIO-1) / MAX_RT_PRIO) + MIN_TIMESLICE)
+
+extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern void set_user_nice(task_t *p, long nice);
+asmlinkage long sys_sched_yield(void);
+#define yield() sys_sched_yield()
/*
* The default (Linux) execution domain.
@@ -475,16 +521,13 @@ extern struct exec_domain default_exec_domain;
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \
- dyn_prio: 0, \
- nice: DEF_NICE, \
+ __nice: DEF_USER_NICE, \
policy: SCHED_OTHER, \
+ cpus_allowed: -1, \
mm: NULL, \
active_mm: &init_mm, \
- cpus_runnable: -1, \
- cpus_allowed: -1, \
- run_list: { NULL, NULL }, \
- rcl_last: 0, \
- time_slice: DEF_TSLICE, \
+ run_list: LIST_HEAD_INIT(tsk.run_list), \
+ time_slice: PRIO_TO_TIMESLICE(DEF_PRIO), \
next_task: &tsk, \
prev_task: &tsk, \
p_opptr: &tsk, \
@@ -560,19 +603,6 @@ static inline struct task_struct *find_task_by_pid(int pid)
return p;
}
-#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
-
-static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
-{
- tsk->processor = cpu;
- tsk->cpus_runnable = 1UL << cpu;
-}
-
-static inline void task_release_cpu(struct task_struct *tsk)
-{
- tsk->cpus_runnable = ~0UL;
-}
-
/* per-UID process charging. */
extern struct user_struct * alloc_uid(uid_t);
extern void free_uid(struct user_struct *);
@@ -600,6 +630,7 @@ extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
+extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -794,6 +825,7 @@ extern void exit_sighand(struct task_struct *);
extern void reparent_to_init(void);
extern void daemonize(void);
+extern task_t *child_reaper;
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
@@ -802,6 +834,9 @@ extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void wait_task_inactive(task_t * p);
+extern void kick_if_running(task_t * p);
+
#define __wait_event(wq, condition) \
do { \
wait_queue_t __wait; \
@@ -882,21 +917,8 @@ do { \
#define next_thread(p) \
list_entry((p)->thread_group.next, struct task_struct, thread_group)
-static inline void del_from_runqueue(struct task_struct * p)
-{
- nr_running--;
- list_del(&p->run_list);
- p->run_list.next = NULL;
-}
-
-static inline int task_on_runqueue(struct task_struct *p)
-{
- return (p->run_list.next != NULL);
-}
-
static inline void unhash_process(struct task_struct *p)
{
- if (task_on_runqueue(p)) BUG();
write_lock_irq(&tasklist_lock);
nr_threads--;
unhash_pid(p);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cf20a8bd6ed8..bb1ff5c5ea1a 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -86,6 +86,8 @@ extern volatile int smp_msg_id;
#define cpu_number_map(cpu) 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
#define cpu_online_map 1
+static inline void smp_send_reschedule(int cpu) { }
+static inline void smp_send_reschedule_all(void) { }
#endif
#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 6194ad03b5ff..d53844b91db3 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -117,13 +117,26 @@ static __inline__ void wait_ms(unsigned int ms)
mdelay(ms);
}
-typedef struct {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
-} devrequest __attribute__ ((packed));
+/**
+ * struct usb_ctrlrequest - structure used to make USB device control requests easier to create and decode
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field
+ * @wIndex: matches the USB wIndex field
+ * @wLength: matches the USB wLength field
+ *
+ * This structure is used to send control requests to a USB device. It matches
+ * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
+ * USB spec for a fuller description of the different fields, and what they are
+ * used for.
+ */
+struct usb_ctrlrequest {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
/*
* USB device number allocation bitmap. There's one bitmap
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 69008e23610c..0bf98f1d598b 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -40,11 +40,11 @@
/* usbdevfs ioctl codes */
struct usbdevfs_ctrltransfer {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
__u32 timeout; /* in milliseconds */
void *data;
};
diff --git a/include/net/bluetooth/hci_usb.h b/include/net/bluetooth/hci_usb.h
index 348b4d49ea7d..e257b8eb06aa 100644
--- a/include/net/bluetooth/hci_usb.h
+++ b/include/net/bluetooth/hci_usb.h
@@ -38,7 +38,7 @@
struct hci_usb {
struct usb_device *udev;
- devrequest dev_req;
+ struct usb_ctrlrequest dev_req;
struct urb *ctrl_urb;
struct urb *intr_urb;
struct urb *read_urb;