From 8555255f0b426858d8648c6206b70eb906cf4ec7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 18 Jun 2006 12:14:01 +0100 Subject: Add generic Kbuild files for 'make headers_install' This adds the Kbuild files listing the files which are to be installed by the 'headers_install' make target, in generic directories. Signed-off-by: David Woodhouse --- include/linux/Kbuild | 63 +++++++++++++++++++++++++++++++++++ include/linux/byteorder/Kbuild | 2 ++ include/linux/dvb/Kbuild | 2 ++ include/linux/hdlc/Kbuild | 1 + include/linux/isdn/Kbuild | 1 + include/linux/netfilter/Kbuild | 11 ++++++ include/linux/netfilter_arp/Kbuild | 2 ++ include/linux/netfilter_bridge/Kbuild | 4 +++ include/linux/netfilter_ipv4/Kbuild | 21 ++++++++++++ include/linux/netfilter_ipv6/Kbuild | 6 ++++ include/linux/nfsd/Kbuild | 2 ++ include/linux/raid/Kbuild | 1 + include/linux/sunrpc/Kbuild | 1 + include/linux/tc_act/Kbuild | 1 + include/linux/tc_ematch/Kbuild | 1 + 15 files changed, 119 insertions(+) create mode 100644 include/linux/Kbuild create mode 100644 include/linux/byteorder/Kbuild create mode 100644 include/linux/dvb/Kbuild create mode 100644 include/linux/hdlc/Kbuild create mode 100644 include/linux/isdn/Kbuild create mode 100644 include/linux/netfilter/Kbuild create mode 100644 include/linux/netfilter_arp/Kbuild create mode 100644 include/linux/netfilter_bridge/Kbuild create mode 100644 include/linux/netfilter_ipv4/Kbuild create mode 100644 include/linux/netfilter_ipv6/Kbuild create mode 100644 include/linux/nfsd/Kbuild create mode 100644 include/linux/raid/Kbuild create mode 100644 include/linux/sunrpc/Kbuild create mode 100644 include/linux/tc_act/Kbuild create mode 100644 include/linux/tc_ematch/Kbuild (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild new file mode 100644 index 000000000000..5c95aa4dc0b4 --- /dev/null +++ b/include/linux/Kbuild @@ -0,0 +1,63 @@ +header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \ + netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \ + netfilter_ipv6/ + +header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \ + atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \ + atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \ + atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \ + awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \ + bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \ + consolemap.h cycx_cfm.h devfs_fs.h dm-ioctl.h dn.h dqblk_v1.h \ + dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \ + fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \ + fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \ + hpfs_fs.h hysdn_if.h i2c-dev.h i2c-id.h i8k.h icmp.h \ + if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \ + if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \ + if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \ + in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \ + ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \ + jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \ + mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \ + nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \ + pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \ + pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \ + qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \ + resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \ + sound.h stddef.h synclink.h telephony.h termios.h ticable.h \ + times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \ + utsname.h video_decoder.h video_encoder.h videotext.h vt.h \ + wavefront.h wireless.h xattr.h x25.h zorro_ids.h + +unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \ + atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \ + capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \ + cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \ + dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \ + ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \ + filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \ + genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \ + i2c-algo-ite.h i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \ + if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \ + if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \ + inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \ + ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \ + joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \ + llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \ + msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \ + netfilter_bridge.h netfilter_decnet.h netfilter.h \ + netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \ + netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \ + n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \ + pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \ + random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \ + route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \ + selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \ + signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \ + soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \ + udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \ + usbdevice_fs.h user.h videodev2.h videodev.h wait.h \ + wanrouter.h watchdog.h xfrm.h zftape.h + +objhdr-y := version.h diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild new file mode 100644 index 000000000000..84a57d4fb212 --- /dev/null +++ b/include/linux/byteorder/Kbuild @@ -0,0 +1,2 @@ +unifdef-y += generic.h swabb.h swab.h +header-y += big_endian.h little_endian.h pdp_endian.h diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild new file mode 100644 index 000000000000..63973af72fd5 --- /dev/null +++ b/include/linux/dvb/Kbuild @@ -0,0 +1,2 @@ +header-y += ca.h frontend.h net.h osd.h version.h +unifdef-y := audio.h dmx.h video.h diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild new file mode 100644 index 000000000000..1fb26448faa9 --- /dev/null +++ b/include/linux/hdlc/Kbuild @@ -0,0 +1 @@ +header-y += ioctl.h diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild new file mode 100644 index 000000000000..c1727c893004 --- /dev/null +++ b/include/linux/isdn/Kbuild @@ -0,0 +1 @@ +header-y += capicmd.h tpam.h diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild new file mode 100644 index 000000000000..d06311acd448 --- /dev/null +++ b/include/linux/netfilter/Kbuild @@ -0,0 +1,11 @@ +header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \ + nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \ + xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \ + xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \ + xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \ + xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \ + xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \ + xt_tcpmss.h xt_tcpudp.h + +unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \ + nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild new file mode 100644 index 000000000000..198ec5e7b17d --- /dev/null +++ b/include/linux/netfilter_arp/Kbuild @@ -0,0 +1,2 @@ +header-y := arpt_mangle.h +unifdef-y := arp_tables.h diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild new file mode 100644 index 000000000000..5b1aba6abbad --- /dev/null +++ b/include/linux/netfilter_bridge/Kbuild @@ -0,0 +1,4 @@ +header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \ + ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \ + ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h +unifdef-y := ebtables.h ebt_802_3.h diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild new file mode 100644 index 000000000000..04e4d2721689 --- /dev/null +++ b/include/linux/netfilter_ipv4/Kbuild @@ -0,0 +1,21 @@ + +header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \ + ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \ + ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \ + ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \ + ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \ + ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \ + ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \ + ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \ + ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \ + ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \ + ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \ + ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \ + ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \ + ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \ + ipt_ULOG.h + +unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \ + ip_conntrack_pptp.h ip_conntrack_proto_gre.h \ + ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \ + ip_tables.h diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild new file mode 100644 index 000000000000..913ddbf55b4b --- /dev/null +++ b/include/linux/netfilter_ipv6/Kbuild @@ -0,0 +1,6 @@ +header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \ + ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \ + ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \ + ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \ + ip6t_physdev.h ip6t_rt.h +unifdef-y := ip6_tables.h diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild new file mode 100644 index 000000000000..c8c545665885 --- /dev/null +++ b/include/linux/nfsd/Kbuild @@ -0,0 +1,2 @@ +unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h + diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild new file mode 100644 index 000000000000..73fa27a8d552 --- /dev/null +++ b/include/linux/raid/Kbuild @@ -0,0 +1 @@ +header-y += md_p.h md_u.h diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild new file mode 100644 index 000000000000..0d1d768a27bf --- /dev/null +++ b/include/linux/sunrpc/Kbuild @@ -0,0 +1 @@ +unifdef-y := debug.h diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild new file mode 100644 index 000000000000..5251a505b2f1 --- /dev/null +++ b/include/linux/tc_act/Kbuild @@ -0,0 +1 @@ +header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild new file mode 100644 index 000000000000..381e93018df6 --- /dev/null +++ b/include/linux/tc_ematch/Kbuild @@ -0,0 +1 @@ +headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h -- cgit v1.2.3 From 1bca9f2e5bd7f92fe4b34753e57bf4f47d3a2f01 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 22 Jun 2006 14:24:31 +0100 Subject: Remove and from userspace export Do not export the following i2c header files to user space: * i2c-id.h: These IDs are internal to the kernel and user space does not need them. * i2c-algo-ite.h: The driver is broken and planned for removal, so let's not export it for now. The extra ioctl defined here can be standardized and moved to i2c-core/i2c-dev if really needed. Signed-off-by: Jean Delvare Signed-off-by: David Woodhouse --- include/linux/Kbuild | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 5c95aa4dc0b4..f7252be57702 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -12,7 +12,7 @@ header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \ dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \ fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \ fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \ - hpfs_fs.h hysdn_if.h i2c-dev.h i2c-id.h i8k.h icmp.h \ + hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \ if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \ if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \ if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \ @@ -38,7 +38,7 @@ unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \ ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \ filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \ genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \ - i2c-algo-ite.h i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \ + i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \ if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \ if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \ inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \ -- cgit v1.2.3 From 844d3b427ef1a4f96e54866747bdb6c6cbca4c6a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 28 Jun 2006 21:48:27 -0700 Subject: MTD: fix all kernel-doc warnings Fix all kernel-doc warnings in MTD headers and source files: - add some missing struct fields; - correct some function parameter names; - use kernel-doc format for function doc. headers; - nand_ecc.c contains only exported interfaces, no internal ones; Signed-off-by: Randy Dunlap Signed-off-by: David Woodhouse --- Documentation/DocBook/mtdnand.tmpl | 4 +++- drivers/mtd/nand/nand_base.c | 16 ++++++++-------- drivers/mtd/nand/nand_ecc.c | 3 +-- include/linux/mtd/nand.h | 13 ++++++++++--- 4 files changed, 22 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index 6e463d0db266..32f385605981 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -1295,7 +1295,9 @@ in this page !Idrivers/mtd/nand/nand_base.c !Idrivers/mtd/nand/nand_bbt.c -!Idrivers/mtd/nand/nand_ecc.c + diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 80a76654d963..62b861304e03 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -155,7 +155,7 @@ static u16 nand_read_word(struct mtd_info *mtd) /** * nand_select_chip - [DEFAULT] control CE line * @mtd: MTD device structure - * @chip: chipnumber to select, -1 for deselect + * @chipnr: chipnumber to select, -1 for deselect * * Default select function for 1 chip devices. */ @@ -542,7 +542,6 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, * Send command to NAND device. This is the version for the new large page * devices We dont have the separate regions as we have in the small page * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. - * */ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) @@ -656,7 +655,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, /** * nand_get_device - [GENERIC] Get chip for selected access - * @this: the nand chip descriptor + * @chip: the nand chip descriptor * @mtd: MTD device structure * @new_state: the state which is requested * @@ -696,13 +695,12 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) /** * nand_wait - [DEFAULT] wait until the command is done * @mtd: MTD device structure - * @this: NAND chip structure + * @chip: NAND chip structure * * Wait for command done. This applies to erase and program only * Erase can take up to 400ms and program up to 20ms according to * general NAND and SmartMedia specs - * -*/ + */ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) { @@ -896,6 +894,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, /** * nand_transfer_oob - [Internal] Transfer oob to client buffer * @chip: nand chip structure + * @oob: oob destination address * @ops: oob ops structure */ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, @@ -946,6 +945,7 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, * * @mtd: MTD device structure * @from: offset to read from + * @ops: oob ops structure * * Internal function. Called with chip held. */ @@ -1760,7 +1760,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, /** * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band * @mtd: MTD device structure - * @from: offset to read from + * @to: offset to write to * @ops: oob operation description structure */ static int nand_write_oob(struct mtd_info *mtd, loff_t to, @@ -2055,7 +2055,7 @@ static void nand_sync(struct mtd_info *mtd) /** * nand_block_isbad - [MTD Interface] Check if block at offset is bad * @mtd: MTD device structure - * @ofs: offset relative to mtd start + * @offs: offset relative to mtd start */ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) { diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index 2a163e4084df..dd438ca47d9a 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -65,8 +65,7 @@ static const u_char nand_ecc_precalc_table[] = { }; /** - * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code - * for 256 byte block + * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block * @mtd: MTD block structure * @dat: raw data * @ecc_code: buffer for ECC diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 66559272ebcb..2266f032a8c6 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -202,7 +202,7 @@ typedef enum { struct nand_chip; /** - * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices + * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices * @lock: protection lock * @active: the mtd device which holds the controller currently * @wq: wait queue to sleep on if a NAND operation is in progress @@ -223,12 +223,15 @@ struct nand_hw_control { * @total: total number of ecc bytes per page * @prepad: padding information for syndrome based ecc generators * @postpad: padding information for syndrome based ecc generators + * @layout: ECC layout control struct pointer * @hwctl: function to control hardware ecc generator. Must only * be provided if an hardware ECC is available * @calculate: function for ecc calculation or readback from ecc hardware * @correct: function for ecc correction, matching to ecc generator (sw/hw) * @read_page: function to read a page according to the ecc generator requirements * @write_page: function to write a page according to the ecc generator requirements + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; @@ -300,11 +303,15 @@ struct nand_buffers { * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready * @ecc: [BOARDSPECIFIC] ecc control ctructure + * @buffers: buffer structure for read/write + * @hwcontrol: platform-specific hardware control structure + * @ops: oob operation operands * @erase_cmd: [INTERN] erase command write function, selectable due to AND support * @scan_bbt: [REPLACEABLE] function to scan bad block table * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress * @state: [INTERN] the current state of the NAND device + * @oob_poi: poison value buffer * @page_shift: [INTERN] number of address bits in a page (column address bits) * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry @@ -521,7 +528,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, * struct platform_nand_chip - chip level device structure * * @nr_chips: max. number of chips to scan for - * @chip_offs: chip number offset + * @chip_offset: chip number offset * @nr_partitions: number of partitions pointed to by partitions (or zero) * @partitions: mtd partition list * @chip_delay: R/B delay value in us @@ -546,7 +553,7 @@ struct platform_nand_chip { * @hwcontrol: platform specific hardware control structure * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function - * @priv_data: private data to transport driver specific settings + * @priv: private data to transport driver specific settings * * All fields are optional and depend on the hardware driver requirements */ -- cgit v1.2.3 From ea9b6dcc152f09c207117ab121d4fa03d2db282a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 28 Jun 2006 21:48:38 -0700 Subject: MTD: kernel-doc fixes + additions Fix some kernel-doc typos/spellos. Use kernel-doc syntax in places where it was almost used. Correct/add struct, struct field, and function param names where needed. Signed-off-by: Randy Dunlap Signed-off-by: David Woodhouse --- Documentation/DocBook/mtdnand.tmpl | 7 ++-- include/linux/mtd/bbm.h | 35 +++++++++-------- include/linux/mtd/mtd.h | 4 +- include/linux/mtd/nand.h | 3 -- include/linux/mtd/onenand.h | 77 ++++++++++++++++++++++---------------- include/mtd/mtd-abi.h | 2 +- 6 files changed, 69 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index 32f385605981..630159c5bdba 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -109,7 +109,7 @@ for most of the implementations. These functions can be replaced by the board driver if neccecary. Those functions are called via pointers in the NAND chip description structure. The board driver can set the functions which - should be replaced by board dependend functions before calling nand_scan(). + should be replaced by board dependent functions before calling nand_scan(). If the function pointer is NULL on entry to nand_scan() then the pointer is set to the default function which is suitable for the detected chip type. @@ -133,7 +133,7 @@ [REPLACEABLE] Replaceable members hold hardware related functions which can be provided by the board driver. The board driver can set the functions which - should be replaced by board dependend functions before calling nand_scan(). + should be replaced by board dependent functions before calling nand_scan(). If the function pointer is NULL on entry to nand_scan() then the pointer is set to the default function which is suitable for the detected chip type. @@ -156,9 +156,8 @@ Basic board driver For most boards it will be sufficient to provide just the - basic functions and fill out some really board dependend + basic functions and fill out some really board dependent members in the nand chip description structure. - See drivers/mtd/nand/skeleton for reference. Basic defines diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 7a7fbe87fef0..1221b7c44158 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -19,21 +19,21 @@ /** * struct nand_bbt_descr - bad block table descriptor - * @param options options for this descriptor - * @param pages the page(s) where we find the bbt, used with + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with * option BBT_ABSPAGE when bbt is searched, * then we store the found bbts pages here. * Its an array and supports up to 8 chips now - * @param offs offset of the pattern in the oob area of the page - * @param veroffs offset of the bbt version counter in the oob are of the page - * @param version version read from the bbt page during scan - * @param len length of the pattern, if 0 no pattern check is performed - * @param maxblocks maximum number of blocks to search for a bbt. This number of - * blocks is reserved at the end of the device + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob area of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This + * number of blocks is reserved at the end of the device * where the tables are written. - * @param reserved_block_code if non-0, this pattern denotes a reserved + * @reserved_block_code: if non-0, this pattern denotes a reserved * (rather than bad) block in the stored bbt - * @param pattern pattern to identify bad block table or factory marked + * @pattern: pattern to identify bad block table or factory marked * good / bad blocks, can be NULL, if len = 0 * * Descriptor for the bad block table marker and the descriptor for the @@ -93,12 +93,15 @@ struct nand_bbt_descr { #define ONENAND_BADBLOCK_POS 0 /** - * struct bbt_info - [GENERIC] Bad Block Table data structure - * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry - * @param badblockpos [INTERN] position of the bad block marker in the oob area - * @param bbt [INTERN] bad block table pointer - * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan - * @param priv [OPTIONAL] pointer to private bbm date + * struct bbm_info - [GENERIC] Bad Block Table data structure + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area + * @options: options for this descriptor + * @bbt: [INTERN] bad block table pointer + * @isbad_bbt: function to determine if a block is bad + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for + * initial bad block scan + * @priv: [OPTIONAL] pointer to private bbm date */ struct bbm_info { int bbt_erase_shift; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 9b7a2b525d63..94a443d45258 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -77,11 +77,11 @@ typedef enum { * * @len: number of bytes to write/read. When a data buffer is given * (datbuf != NULL) this is the number of data bytes. When - + no data buffer is available this is the number of oob bytes. + * no data buffer is available this is the number of oob bytes. * * @retlen: number of bytes written/read. When a data buffer is given * (datbuf != NULL) this is the number of data bytes. When - + no data buffer is available this is the number of oob bytes. + * no data buffer is available this is the number of oob bytes. * * @ooblen: number of oob bytes per page * @ooboffs: offset of oob data in the oob area (only relevant when diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 2266f032a8c6..0b4cd2fa64aa 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -407,7 +407,6 @@ struct nand_chip { /** * struct nand_flash_dev - NAND Flash Device ID Structure - * * @name: Identify the device type * @id: device ID code * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 @@ -526,7 +525,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, /** * struct platform_nand_chip - chip level device structure - * * @nr_chips: max. number of chips to scan for * @chip_offset: chip number offset * @nr_partitions: number of partitions pointed to by partitions (or zero) @@ -549,7 +547,6 @@ struct platform_nand_chip { /** * struct platform_nand_ctrl - controller level device structure - * * @hwcontrol: platform specific hardware control structure * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 9ce9a48db444..1f4972155249 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips); /* Free resources held by the OneNAND device */ extern void onenand_release(struct mtd_info *mtd); -/** +/* * onenand_state_t - chip states * Enumeration for OneNAND flash chip state */ @@ -42,9 +42,9 @@ typedef enum { /** * struct onenand_bufferram - OneNAND BufferRAM Data - * @param block block address in BufferRAM - * @param page page address in BufferRAM - * @param valid valid flag + * @block: block address in BufferRAM + * @page: page address in BufferRAM + * @valid: valid flag */ struct onenand_bufferram { int block; @@ -54,32 +54,43 @@ struct onenand_bufferram { /** * struct onenand_chip - OneNAND Private Flash Chip Data - * @param base [BOARDSPECIFIC] address to access OneNAND - * @param chipsize [INTERN] the size of one chip for multichip arrays - * @param device_id [INTERN] device ID - * @param verstion_id [INTERN] version ID - * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about - * @param erase_shift [INTERN] number of address bits in a block - * @param page_shift [INTERN] number of address bits in a page - * @param ppb_shift [INTERN] number of address bits in a pages per block - * @param page_mask [INTERN] a page per block mask - * @param bufferam_index [INTERN] BufferRAM index - * @param bufferam [INTERN] BufferRAM info - * @param readw [REPLACEABLE] hardware specific function for read short - * @param writew [REPLACEABLE] hardware specific function for write short - * @param command [REPLACEABLE] hardware specific function for writing commands to the chip - * @param wait [REPLACEABLE] hardware specific function for wait on ready - * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area - * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area - * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND - * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND - * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table - * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip - * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress - * @param state [INTERN] the current state of the OneNAND device - * @param ecclayout [REPLACEABLE] the default ecc placement scheme - * @param bbm [REPLACEABLE] pointer to Bad Block Management - * @param priv [OPTIONAL] pointer to private chip date + * @base: [BOARDSPECIFIC] address to access OneNAND + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @device_id: [INTERN] device ID + * @density_mask: chip density, used for DDP devices + * @verstion_id: [INTERN] version ID + * @options: [BOARDSPECIFIC] various chip options. They can + * partly be set to inform onenand_scan about + * @erase_shift: [INTERN] number of address bits in a block + * @page_shift: [INTERN] number of address bits in a page + * @ppb_shift: [INTERN] number of address bits in a pages per block + * @page_mask: [INTERN] a page per block mask + * @bufferram_index: [INTERN] BufferRAM index + * @bufferram: [INTERN] BufferRAM info + * @readw: [REPLACEABLE] hardware specific function for read short + * @writew: [REPLACEABLE] hardware specific function for write short + * @command: [REPLACEABLE] hardware specific function for writing + * commands to the chip + * @wait: [REPLACEABLE] hardware specific function for wait on ready + * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @read_word: [REPLACEABLE] hardware specific function for read + * register of OneNAND + * @write_word: [REPLACEABLE] hardware specific function for write + * register of OneNAND + * @mmcontrol: sync burst read function + * @block_markbad: function to mark a block as bad + * @scan_bbt: [REPLACEALBE] hardware specific function for scanning + * Bad block Table + * @chip_lock: [INTERN] spinlock used to protect access to this + * structure and the chip + * @wq: [INTERN] wait queue to sleep on if a OneNAND + * operation is in progress + * @state: [INTERN] the current state of the OneNAND device + * @page_buf: data buffer + * @ecclayout: [REPLACEABLE] the default ecc placement scheme + * @bbm: [REPLACEABLE] pointer to Bad Block Management + * @priv: [OPTIONAL] pointer to private chip date */ struct onenand_chip { void __iomem *base; @@ -147,9 +158,9 @@ struct onenand_chip { #define ONENAND_MFR_SAMSUNG 0xec /** - * struct nand_manufacturers - NAND Flash Manufacturer ID Structure - * @param name: Manufacturer name - * @param id: manufacturer ID code of device. + * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. */ struct onenand_manufacturers { int id; diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h index 31329fce1ff5..1da3f7fa7993 100644 --- a/include/mtd/mtd-abi.h +++ b/include/mtd/mtd-abi.h @@ -133,7 +133,7 @@ struct nand_ecclayout { }; /** - * struct mtd_ecc_stats - error correction status + * struct mtd_ecc_stats - error correction stats * * @corrected: number of corrected bits * @failed: number of uncorrectable errors -- cgit v1.2.3 From 257a5bdeb0441789d8e34e1b3e92b26d0f51bbf0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 29 Jun 2006 22:40:06 +0100 Subject: Remove export of include/linux/isdn/tpam.h Signed-off-by: David Woodhouse --- include/linux/isdn/Kbuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild index c1727c893004..991cdb29ab2e 100644 --- a/include/linux/isdn/Kbuild +++ b/include/linux/isdn/Kbuild @@ -1 +1 @@ -header-y += capicmd.h tpam.h +header-y += capicmd.h -- cgit v1.2.3 From 7a6bc1cdd506cf81f856f0fef4e56a2ba0c5a26d Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Wed, 28 Jun 2006 13:50:33 -0700 Subject: [CPUFREQ] Add queue_delayed_work_on() interface for workqueues. Add queue_delayed_work_on() interface for workqueues. Signed-off-by: Alexey Starikovskiy Signed-off-by: Venkatesh Pallipadi Signed-off-by: Dave Jones --- include/linux/workqueue.h | 2 ++ kernel/workqueue.c | 38 +++++++++++++++++++++++--------------- 2 files changed, 25 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 957c21c16d62..9bca3539a1e5 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq); extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); +extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work, unsigned long delay); extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); extern int FASTCALL(schedule_work(struct work_struct *work)); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 59f0b42bd89e..8fbef7008a7e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -148,6 +148,27 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, return ret; } +int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work, unsigned long delay) +{ + int ret = 0; + struct timer_list *timer = &work->timer; + + if (!test_and_set_bit(0, &work->pending)) { + BUG_ON(timer_pending(timer)); + BUG_ON(!list_empty(&work->entry)); + + /* This stores wq for the moment, for the timer_fn */ + work->wq_data = wq; + timer->expires = jiffies + delay; + timer->data = (unsigned long)work; + timer->function = delayed_work_timer_fn; + add_timer_on(timer, cpu); + ret = 1; + } + return ret; +} + static void run_workqueue(struct cpu_workqueue_struct *cwq) { unsigned long flags; @@ -411,21 +432,7 @@ int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay) { - int ret = 0; - struct timer_list *timer = &work->timer; - - if (!test_and_set_bit(0, &work->pending)) { - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); - /* This stores keventd_wq for the moment, for the timer_fn */ - work->wq_data = keventd_wq; - timer->expires = jiffies + delay; - timer->data = (unsigned long)work; - timer->function = delayed_work_timer_fn; - add_timer_on(timer, cpu); - ret = 1; - } - return ret; + return queue_delayed_work_on(cpu, keventd_wq, work, delay); } /** @@ -622,6 +629,7 @@ void init_workqueues(void) EXPORT_SYMBOL_GPL(__create_workqueue); EXPORT_SYMBOL_GPL(queue_work); EXPORT_SYMBOL_GPL(queue_delayed_work); +EXPORT_SYMBOL_GPL(queue_delayed_work_on); EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(destroy_workqueue); -- cgit v1.2.3 From 947deee8904b3c2edc7f59ab6e6242499e4dc434 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 2 Jul 2006 20:45:51 +0100 Subject: [SERIAL] Convert fifosize to an unsigned int Some UARTs have more than 255 bytes of FIFO, which can't be represented by an unsigned char. Change the kernel's internal structure to be an unsigned int, but still export an unsigned char via the TIOCGSERIAL ioctl. If the TIOCSSERIAL ioctl provides a fifo size of 0, assume this means "don't change" otherwise we'll corrupt the larger fifo sizes. Signed-off-by: Russell King --- drivers/serial/mpc52xx_uart.c | 3 +-- drivers/serial/serial_core.c | 6 ++++-- include/linux/serial_core.h | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 6459edc7f5c5..1b2cdc91c228 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -728,8 +728,7 @@ mpc52xx_uart_probe(struct platform_device *dev) spin_lock_init(&port->lock); port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */ - port->fifosize = 255; /* Should be 512 ! But it can't be */ - /* stored in a unsigned char */ + port->fifosize = 512; port->iotype = UPIO_MEM; port->flags = UPF_BOOT_AUTOCONF | ( uart_console(port) ? 0 : UPF_IOREMAP ); diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 17839e753e4c..3805f467b2f5 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -691,7 +691,8 @@ static int uart_set_info(struct uart_state *state, (new_serial.baud_base != port->uartclk / 16) || (close_delay != state->close_delay) || (closing_wait != state->closing_wait) || - (new_serial.xmit_fifo_size != port->fifosize) || + (new_serial.xmit_fifo_size && + new_serial.xmit_fifo_size != port->fifosize) || (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) goto exit; port->flags = ((port->flags & ~UPF_USR_MASK) | @@ -796,7 +797,8 @@ static int uart_set_info(struct uart_state *state, port->custom_divisor = new_serial.custom_divisor; state->close_delay = close_delay; state->closing_wait = closing_wait; - port->fifosize = new_serial.xmit_fifo_size; + if (new_serial.xmit_fifo_size) + port->fifosize = new_serial.xmit_fifo_size; if (state->info->tty) state->info->tty->low_latency = (port->flags & UPF_LOW_LATENCY) ? 1 : 0; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index bd14858121ea..966e2e8a1740 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -214,10 +214,11 @@ struct uart_port { unsigned char __iomem *membase; /* read/write[bwl] */ unsigned int irq; /* irq number */ unsigned int uartclk; /* base uart clock */ - unsigned char fifosize; /* tx fifo size */ + unsigned int fifosize; /* tx fifo size */ unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char iotype; /* io access style */ + unsigned char unused1; #define UPIO_PORT (0) #define UPIO_HUB6 (1) -- cgit v1.2.3 From 63104eec234bdecb55fd9c15467ae00d0a3f42ac Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Mon, 3 Jul 2006 23:30:54 +0200 Subject: kbuild: introduce utsrelease.h include/linux/version.h contained both actual KERNEL version and UTS_RELEASE that contains a subset from git SHA1 for when kernel was compiled as part of a git repository. This had the unfortunate side-effect that all files including version.h would be recompiled when some git changes was made due to changes SHA1. Split it out so we keep independent parts in separate files. Also update checkversion.pl script to no longer check for UTS_RELEASE. Signed-off-by: Sam Ravnborg --- Makefile | 30 +++++++++++++++----------- arch/alpha/boot/bootp.c | 2 +- arch/alpha/boot/bootpz.c | 2 +- arch/alpha/boot/main.c | 2 +- arch/frv/kernel/setup.c | 2 +- arch/i386/boot/setup.S | 2 +- arch/powerpc/platforms/chrp/setup.c | 2 +- arch/powerpc/platforms/powermac/bootx_init.c | 2 +- arch/ppc/syslib/btext.c | 2 +- arch/x86_64/boot/setup.S | 2 +- drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c | 2 +- include/linux/vermagic.h | 2 +- init/version.c | 1 + scripts/checkversion.pl | 7 +++--- 14 files changed, 33 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/Makefile b/Makefile index 1cc793ff42d4..b36aeb617bd2 100644 --- a/Makefile +++ b/Makefile @@ -812,8 +812,8 @@ endif # prepare2 creates a makefile if using a separate output directory prepare2: prepare3 outputmakefile -prepare1: prepare2 include/linux/version.h include/asm \ - include/config/auto.conf +prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \ + include/asm include/config/auto.conf ifneq ($(KBUILD_MODULES),) $(Q)mkdir -p $(MODVERDIR) $(Q)rm -f $(MODVERDIR)/* @@ -848,21 +848,26 @@ include/asm: # needs to be updated, so this check is forced on all builds uts_len := 64 +define filechk_utsrelease.h + if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ + echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ + exit 1; \ + fi; \ + (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";) +endef define filechk_version.h - if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ - echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ - exit 1; \ - fi; \ - (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"; \ - echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 + $(PATCHLEVEL) \\* 256 + $(SUBLEVEL)`; \ - echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'; \ - ) + (echo \#define LINUX_VERSION_CODE $(shell \ + expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) endef -include/linux/version.h: $(srctree)/Makefile include/config/kernel.release FORCE +include/linux/version.h: $(srctree)/Makefile FORCE $(call filechk,version.h) +include/linux/utsrelease.h: include/config/kernel.release FORCE + $(call filechk,utsrelease.h) + # --------------------------------------------------------------------------- PHONY += depend dep @@ -955,7 +960,8 @@ CLEAN_FILES += vmlinux System.map \ # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config include2 MRPROPER_FILES += .config .config.old include/asm .version .old_version \ - include/linux/autoconf.h include/linux/version.h \ + include/linux/autoconf.h include/linux/version.h \ + include/linux/utsrelease.h \ Module.symvers tags TAGS cscope* # clean - Delete most, but leave enough to build external modules diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index ec53c28e33de..3af21c789339 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c index a6657f2cf9bd..4307bde80a35 100644 --- a/arch/alpha/boot/bootpz.c +++ b/arch/alpha/boot/bootpz.c @@ -11,7 +11,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index 78c9b0b6eea7..90ed55b662a8 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -7,7 +7,7 @@ */ #include #include -#include +#include #include #include diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index 5db3d4eff909..af08ccd4ed6e 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include #include diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index 0a5a3be6d69c..d2b684cd620a 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S @@ -47,7 +47,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 1f1771b212b4..9df9f2079e9b 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index cb257aeb91f6..24f09e2a5775 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c index 51ab6e90fe25..d11667046f21 100644 --- a/arch/ppc/syslib/btext.c +++ b/arch/ppc/syslib/btext.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S index 7de8b8fd1685..a50b631f4d2b 100644 --- a/arch/x86_64/boot/setup.S +++ b/arch/x86_64/boot/setup.S @@ -46,7 +46,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c index b3ffcf501311..e386dcc32e8c 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include static void bcm43xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index dc7c621e4647..46919f9f5eb3 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,4 +1,4 @@ -#include +#include #include /* Simply sanity version stamp for modules. */ diff --git a/init/version.c b/init/version.c index 3ddc3ceec2fe..e290802c6bd2 100644 --- a/init/version.c +++ b/init/version.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #define version(a) Version_ ## a diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl index 9f84e562318d..ec7d21161bdc 100755 --- a/scripts/checkversion.pl +++ b/scripts/checkversion.pl @@ -1,7 +1,7 @@ #! /usr/bin/perl # -# checkversion find uses of LINUX_VERSION_CODE, KERNEL_VERSION, or -# UTS_RELEASE without including , or cases of +# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION +# without including , or cases of # including that don't need it. # Copyright (C) 2003, Randy Dunlap @@ -41,8 +41,7 @@ foreach $file (@ARGV) } # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE - if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) || - ($_ =~ /UTS_RELEASE/)) { + if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) { $fUseVersion = 1; last LINE if $iLinuxVersion; } -- cgit v1.2.3 From b02454f43578b24bc8b8ab54a239156841f56f6d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 3 Jul 2006 00:24:06 -0700 Subject: [PATCH] lockdep: special s390 print_symbol() version Have a special version of print_symbol() for s390 which clears the most significant bit of addr before calling __print_symbol(). This seems to be better than checking/changing each place in the kernel that saves an instruction pointer. Without this the output would look like: hardirqs last enabled at (30907): [<80018c6a>] 0x80018c6a hardirqs last disabled at (30908): [<8001e48c>] 0x8001e48c softirqs last enabled at (30904): [<8001dc96>] 0x8001dc96 softirqs last disabled at (30897): [<8001dc50>] 0x8001dc50 instead of this: hardirqs last enabled at (19421): [<80018c72>] cpu_idle+0x176/0x1c4 hardirqs last disabled at (19422): [<8001e494>] io_no_vtime+0xa/0x1a softirqs last enabled at (19418): [<8001dc9e>] do_softirq+0xa6/0xe8 softirqs last disabled at (19411): [<8001dc58>] do_softirq+0x60/0xe8 Acked-by: Ingo Molnar Cc: Arjan van de Ven Cc: Martin Schwidefsky Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kallsyms.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 54e2549f96ba..ad71ac053d6e 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -57,10 +57,11 @@ do { \ #define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr) #endif -#define print_symbol(fmt, addr) \ -do { \ - __check_printsym_format(fmt, ""); \ - __print_symbol(fmt, addr); \ -} while(0) +static inline void print_symbol(const char *fmt, unsigned long addr) +{ + __check_printsym_format(fmt, ""); + __print_symbol(fmt, (unsigned long) + __builtin_extract_return_addr((void *)addr)); +} #endif /*_LINUX_KALLSYMS_H*/ -- cgit v1.2.3 From c32928c579d88acd43981b59e86900da65f40762 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 3 Jul 2006 00:24:10 -0700 Subject: [PATCH] PNPACPI: support shareable interrupts ACPI supplies a "shareable" indication, but PNPACPI ignores it. If a PNP device uses a shared interrupt, request_irq() fails because the PNP driver can't tell whether to supply SA_SHIRQ. This patch allows PNP drivers to test (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE) Signed-off-by: Bjorn Helgaas Cc: Adam Belay Cc: Matthieu Castet Cc: Li Shaohua Cc: Len Brown Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/pnp/pnpacpi/rsparser.c | 11 ++++++++--- include/linux/ioport.h | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 3a4a644c2686..212268881857 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -74,7 +74,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity) static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi, - int triggering, int polarity) + int triggering, int polarity, int shareable) { int i = 0; int irq; @@ -95,6 +95,9 @@ pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi, return; } + if (shareable) + res->irq_resource[i].flags |= IORESOURCE_IRQ_SHAREABLE; + res->irq_resource[i].start = irq; res->irq_resource[i].end = irq; pcibios_penalize_isa_irq(irq, 1); @@ -194,7 +197,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, pnpacpi_parse_allocated_irqresource(res_table, res->data.irq.interrupts[i], res->data.irq.triggering, - res->data.irq.polarity); + res->data.irq.polarity, + res->data.irq.sharable); } break; @@ -255,7 +259,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, pnpacpi_parse_allocated_irqresource(res_table, res->data.extended_irq.interrupts[i], res->data.extended_irq.triggering, - res->data.extended_irq.polarity); + res->data.extended_irq.polarity, + res->data.extended_irq.sharable); } break; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 87a9fc039b47..5612dfeeae50 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -55,6 +55,7 @@ struct resource_list { #define IORESOURCE_IRQ_LOWEDGE (1<<1) #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) #define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) /* ISA PnP DMA specific bits (IORESOURCE_BITS) */ #define IORESOURCE_DMA_TYPE_MASK (3<<0) -- cgit v1.2.3 From 9614634fe6a138fd8ae044950700d2af8d203f97 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 3 Jul 2006 00:24:13 -0700 Subject: [PATCH] ZVC/zone_reclaim: Leave 1% of unmapped pagecache pages for file I/O It turns out that it is advantageous to leave a small portion of unmapped file backed pages if all of a zone's pages (or almost all pages) are allocated and so the page allocator has to go off-node. This allows recently used file I/O buffers to stay on the node and reduces the times that zone reclaim is invoked if file I/O occurs when we run out of memory in a zone. The problem is that zone reclaim runs too frequently when the page cache is used for file I/O (read write and therefore unmapped pages!) alone and we have almost all pages of the zone allocated. Zone reclaim may remove 32 unmapped pages. File I/O will use these pages for the next read/write requests and the unmapped pages increase. After the zone has filled up again zone reclaim will remove it again after only 32 pages. This cycle is too inefficient and there are potentially too many zone reclaim cycles. With the 1% boundary we may still remove all unmapped pages for file I/O in zone reclaim pass. However. it will take a large number of read and writes to get back to 1% again where we trigger zone reclaim again. The zone reclaim 2.6.16/17 does not show this behavior because we have a 30 second timeout. [akpm@osdl.org: rename the /proc file and the variable] Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 14 ++++++++++++++ include/linux/mmzone.h | 6 ++++++ include/linux/swap.h | 1 + include/linux/sysctl.h | 2 +- kernel/sysctl.c | 11 +++++++++++ mm/page_alloc.c | 22 ++++++++++++++++++++++ mm/vmscan.c | 27 ++++++++++++++------------- 7 files changed, 69 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 86754eb390da..7cee90223d3a 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm: - block_dump - drop-caches - zone_reclaim_mode +- min_unmapped_ratio - panic_on_oom ============================================================== @@ -168,6 +169,19 @@ in all nodes of the system. ============================================================= +min_unmapped_ratio: + +This is available only on NUMA kernels. + +A percentage of the file backed pages in each zone. Zone reclaim will only +occur if more than this percentage of pages are file backed and unmapped. +This is to insure that a minimal amount of local pages is still available for +file I/O even if the node is overallocated. + +The default is 1 percent. + +============================================================= + panic_on_oom This enables or disables panic on out-of-memory feature. If this is set to 1, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 27e748eb72b0..656b588a9f96 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -150,6 +150,10 @@ struct zone { unsigned long lowmem_reserve[MAX_NR_ZONES]; #ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_ratio; struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; @@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + struct file *, void __user *, size_t *, loff_t *); #include /* Returns the number of the current Node. */ diff --git a/include/linux/swap.h b/include/linux/swap.h index cf6ca6e377bd..5e59184c9096 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -189,6 +189,7 @@ extern long vm_total_pages; #ifdef CONFIG_NUMA extern int zone_reclaim_mode; +extern int sysctl_min_unmapped_ratio; extern int zone_reclaim(struct zone *, gfp_t, unsigned int); #else #define zone_reclaim_mode 0 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 46e4d8f2771f..e4b1a4d4dcf3 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -188,7 +188,7 @@ enum VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ - VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ + VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ }; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 99a58f279077..362a0cc37138 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -932,6 +932,17 @@ static ctl_table vm_table[] = { .strategy = &sysctl_intvec, .extra1 = &zero, }, + { + .ctl_name = VM_MIN_UNMAPPED, + .procname = "min_unmapped_ratio", + .data = &sysctl_min_unmapped_ratio, + .maxlen = sizeof(sysctl_min_unmapped_ratio), + .mode = 0644, + .proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one_hundred, + }, #endif #ifdef CONFIG_X86_32 { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3e792a583f3b..54a4f5375bba 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2005,6 +2005,10 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->spanned_pages = size; zone->present_pages = realsize; +#ifdef CONFIG_NUMA + zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio) + / 100; +#endif zone->name = zone_names[j]; spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); @@ -2298,6 +2302,24 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, return 0; } +#ifdef CONFIG_NUMA +int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, + struct file *file, void __user *buffer, size_t *length, loff_t *ppos) +{ + struct zone *zone; + int rc; + + rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); + if (rc) + return rc; + + for_each_zone(zone) + zone->min_unmapped_ratio = (zone->present_pages * + sysctl_min_unmapped_ratio) / 100; + return 0; +} +#endif + /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() diff --git a/mm/vmscan.c b/mm/vmscan.c index ff2ebe9458a3..5d4c4d02254d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1503,10 +1503,6 @@ module_init(kswapd_init) * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. - * - * In the future we may add flags to the mode. However, the page allocator - * should only have to check that zone_reclaim_mode != 0 before calling - * zone_reclaim(). */ int zone_reclaim_mode __read_mostly; @@ -1523,6 +1519,12 @@ int zone_reclaim_mode __read_mostly; */ #define ZONE_RECLAIM_PRIORITY 4 +/* + * Percentage of pages in a zone that must be unmapped for zone_reclaim to + * occur. + */ +int sysctl_min_unmapped_ratio = 1; + /* * Try to free up some pages from this zone through reclaim. */ @@ -1590,18 +1592,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) int node_id; /* - * Do not reclaim if there are not enough reclaimable pages in this - * zone that would satify this allocations. + * Zone reclaim reclaims unmapped file backed pages. * - * All unmapped pagecache pages are reclaimable. - * - * Both counters may be temporarily off a bit so we use - * SWAP_CLUSTER_MAX as the boundary. It may also be good to - * leave a few frequently used unmapped pagecache pages around. + * A small portion of unmapped file backed pages is needed for + * file I/O otherwise pages read by file I/O will be immediately + * thrown out if the zone is overallocated. So we do not reclaim + * if less than a specified percentage of the zone is used by + * unmapped file backed pages. */ if (zone_page_state(zone, NR_FILE_PAGES) - - zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX) - return 0; + zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio) + return 0; /* * Avoid concurrent zone reclaims, do not reclaim in a zone that does -- cgit v1.2.3 From 4d435f9d8ff01ae726a2a84edb9c2457787a337e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:24 -0700 Subject: [PATCH] lockdep: add is_module_address() Add is_module_address() method - to be used by lockdep. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/module.h | 6 ++++++ kernel/module.c | 23 +++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 9e9dc7c24d95..d06c74fb8c26 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod) /* Is this address in a module? (second is with no locks, for oops) */ struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); +int is_module_address(unsigned long addr); /* Returns module and fills in value, defined and namebuf, or NULL if symnum out of range. */ @@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr) return NULL; } +static inline int is_module_address(unsigned long addr) +{ + return 0; +} + /* Get/put a kernel symbol (calls should be symmetric) */ #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) #define symbol_put(x) do { } while(0) diff --git a/kernel/module.c b/kernel/module.c index 281172f01e9a..0351625767b1 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2159,6 +2159,29 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) return e; } +/* + * Is this a valid module address? + */ +int is_module_address(unsigned long addr) +{ + unsigned long flags; + struct module *mod; + + spin_lock_irqsave(&modlist_lock, flags); + + list_for_each_entry(mod, &modules, list) { + if (within(addr, mod->module_core, mod->core_size)) { + spin_unlock_irqrestore(&modlist_lock, flags); + return 1; + } + } + + spin_unlock_irqrestore(&modlist_lock, flags); + + return 0; +} + + /* Is this a valid kernel address? We don't grab the lock: we are oopsing. */ struct module *__module_text_address(unsigned long addr) { -- cgit v1.2.3 From 8d8fdf5c76816e5263073008f03f097ffc713db3 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 3 Jul 2006 00:24:25 -0700 Subject: [PATCH] lockdep: add print_ip_sym() Provide a common print_ip_sym() function that prints the passed instruction pointer as well as the symbol belonging to it. Avoids adding a bunch of #ifdef CONFIG_64BIT in order to get the printk format right on 32/64 bit platforms. Acked-by: Ingo Molnar Cc: Arjan van de Ven Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kallsyms.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index ad71ac053d6e..849043ce4ed6 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -64,4 +64,18 @@ static inline void print_symbol(const char *fmt, unsigned long addr) __builtin_extract_return_addr((void *)addr)); } +#ifndef CONFIG_64BIT +#define print_ip_sym(ip) \ +do { \ + printk("[<%08lx>]", ip); \ + print_symbol(" %s\n", ip); \ +} while(0) +#else +#define print_ip_sym(ip) \ +do { \ + printk("[<%016lx>]", ip); \ + print_symbol(" %s\n", ip); \ +} while(0) +#endif + #endif /*_LINUX_KALLSYMS_H*/ -- cgit v1.2.3 From c01d403b2e3e3f231b18ebd07ad64ecbe6a258a5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:27 -0700 Subject: [PATCH] lockdep: add disable/enable_irq_lockdep() API lockdep wants to use the disable_irq()/enable_irq() prototypes before they are provied by the platform's asm/irq.h. So move them out of the CONFIG_GENERIC_HARDIRQS define - all architectures have a common prototype for this anyway. Add special lockdep variants of irq line disabling/enabling. These should be used for locking constructs that know that a particular irq context which is disabled, and which is the only irq-context user of a lock, that it's safe to take the lock in the irq-disabled section without disabling hardirqs. [akpm@osdl.org: build fix] Signed-off-by: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 49 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index da3e0dbe61d4..2c5452c1d7bb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -86,6 +86,41 @@ extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP + local_irq_enable(); +#endif + enable_irq(irq); +} + /* IRQ wakeup (PM) control: */ extern int set_irq_wake(unsigned int irq, unsigned int on); @@ -99,7 +134,19 @@ static inline int disable_irq_wake(unsigned int irq) return set_irq_wake(irq, 0); } -#endif +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +# ifndef CONFIG_LOCKDEP +# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_lockdep(irq) disable_irq(irq) +# define enable_irq_lockdep(irq) enable_irq(irq) +# endif + +#endif /* CONFIG_GENERIC_HARDIRQS */ #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) -- cgit v1.2.3 From d7e9629de051bb4b1d104588cd97673ad770809e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:27 -0700 Subject: [PATCH] lockdep: add local_irq_enable_in_hardirq() API Introduce local_irq_enable_in_hardirq() API. It is currently aliased to local_irq_enable(), hence has no functional effects. This API will be used by lockdep, but even without lockdep this will better document places in the kernel where a hardirq context enables hardirqs. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/interrupt.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2c5452c1d7bb..73463fbb38e4 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -80,6 +80,23 @@ extern int request_irq(unsigned int, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif #ifdef CONFIG_GENERIC_HARDIRQS extern void disable_irq_nosync(unsigned int irq); -- cgit v1.2.3 From 8b3db9c542e18b71d4820da4dd9401ee030feacb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:28 -0700 Subject: [PATCH] lockdep: add DECLARE_COMPLETION_ONSTACK() API lockdep needs to have the waitqueue lock initialized for on-stack waitqueues implicitly initialized by DECLARE_COMPLETION(). Introduce the API. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/completion.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/completion.h b/include/linux/completion.h index 90663ad217f9..251c41e3ddd5 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -21,6 +21,18 @@ struct completion { #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = ({ init_completion(&work); work; }) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +#endif + static inline void init_completion(struct completion *x) { x->done = 0; -- cgit v1.2.3 From c4e05116a2c4d8187127dbf77ab790aa57a47388 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:29 -0700 Subject: [PATCH] lockdep: clean up rwsems Clean up rwsems. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/rwsem.h | 17 +------ include/linux/rwsem-spinlock.h | 14 +----- include/linux/rwsem.h | 24 ---------- kernel/rwsem.c | 105 +++++++++++++++++++++++++++++++++++++++++ lib/rwsem-spinlock.c | 46 +----------------- lib/rwsem.c | 31 ------------ 6 files changed, 109 insertions(+), 128 deletions(-) create mode 100644 kernel/rwsem.c (limited to 'include/linux') diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index be4ab859238e..558804e4a039 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h @@ -61,23 +61,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -87,9 +75,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index f30f805080ae..d68afcc36ac9 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -32,22 +32,10 @@ struct rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ -{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f99fe90732ab..93581534b915 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -9,8 +9,6 @@ #include -#define RWSEM_DEBUG 0 - #ifdef __KERNEL__ #include @@ -26,23 +24,13 @@ struct rw_semaphore; #include /* use an arch-specific implementation */ #endif -#ifndef rwsemtrace -#if RWSEM_DEBUG -extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); -#else -#define rwsemtrace(SEM,FMT) -#endif -#endif - /* * lock for reading */ static inline void down_read(struct rw_semaphore *sem) { might_sleep(); - rwsemtrace(sem,"Entering down_read"); __down_read(sem); - rwsemtrace(sem,"Leaving down_read"); } /* @@ -51,9 +39,7 @@ static inline void down_read(struct rw_semaphore *sem) static inline int down_read_trylock(struct rw_semaphore *sem) { int ret; - rwsemtrace(sem,"Entering down_read_trylock"); ret = __down_read_trylock(sem); - rwsemtrace(sem,"Leaving down_read_trylock"); return ret; } @@ -63,9 +49,7 @@ static inline int down_read_trylock(struct rw_semaphore *sem) static inline void down_write(struct rw_semaphore *sem) { might_sleep(); - rwsemtrace(sem,"Entering down_write"); __down_write(sem); - rwsemtrace(sem,"Leaving down_write"); } /* @@ -74,9 +58,7 @@ static inline void down_write(struct rw_semaphore *sem) static inline int down_write_trylock(struct rw_semaphore *sem) { int ret; - rwsemtrace(sem,"Entering down_write_trylock"); ret = __down_write_trylock(sem); - rwsemtrace(sem,"Leaving down_write_trylock"); return ret; } @@ -85,9 +67,7 @@ static inline int down_write_trylock(struct rw_semaphore *sem) */ static inline void up_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_read"); __up_read(sem); - rwsemtrace(sem,"Leaving up_read"); } /* @@ -95,9 +75,7 @@ static inline void up_read(struct rw_semaphore *sem) */ static inline void up_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_write"); __up_write(sem); - rwsemtrace(sem,"Leaving up_write"); } /* @@ -105,9 +83,7 @@ static inline void up_write(struct rw_semaphore *sem) */ static inline void downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering downgrade_write"); __downgrade_write(sem); - rwsemtrace(sem,"Leaving downgrade_write"); } #endif /* __KERNEL__ */ diff --git a/kernel/rwsem.c b/kernel/rwsem.c new file mode 100644 index 000000000000..790a99bb25aa --- /dev/null +++ b/kernel/rwsem.c @@ -0,0 +1,105 @@ +/* kernel/rwsem.c: R/W semaphores, public implementation + * + * Written by David Howells (dhowells@redhat.com). + * Derived from asm-i386/semaphore.h + */ + +#include +#include +#include +#include + +#include +#include + +/* + * lock for reading + */ +void down_read(struct rw_semaphore *sem) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); + + __down_read(sem); +} + +EXPORT_SYMBOL(down_read); + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +int down_read_trylock(struct rw_semaphore *sem) +{ + int ret = __down_read_trylock(sem); + + if (ret == 1) + rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); + return ret; +} + +EXPORT_SYMBOL(down_read_trylock); + +/* + * lock for writing + */ +void down_write(struct rw_semaphore *sem) +{ + might_sleep(); + rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); + + __down_write(sem); +} + +EXPORT_SYMBOL(down_write); + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +int down_write_trylock(struct rw_semaphore *sem) +{ + int ret = __down_write_trylock(sem); + + if (ret == 1) + rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); + return ret; +} + +EXPORT_SYMBOL(down_write_trylock); + +/* + * release a read lock + */ +void up_read(struct rw_semaphore *sem) +{ + rwsem_release(&sem->dep_map, 1, _RET_IP_); + + __up_read(sem); +} + +EXPORT_SYMBOL(up_read); + +/* + * release a write lock + */ +void up_write(struct rw_semaphore *sem) +{ + rwsem_release(&sem->dep_map, 1, _RET_IP_); + + __up_write(sem); +} + +EXPORT_SYMBOL(up_write); + +/* + * downgrade write lock to read lock + */ +void downgrade_write(struct rw_semaphore *sem) +{ + /* + * lockdep: a downgraded write will live on as a write + * dependency. + */ + __downgrade_write(sem); +} + +EXPORT_SYMBOL(downgrade_write); diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 40ffde940a86..03b6097eb04e 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,16 +17,6 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - if (sem->debug) - printk("[%d] %s({%d,%d})\n", - current->pid, str, sem->activity, - list_empty(&sem->wait_list) ? 0 : 1); -} -#endif - /* * initialise the semaphore */ @@ -35,9 +25,6 @@ void fastcall init_rwsem(struct rw_semaphore *sem) sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* @@ -56,8 +43,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) struct task_struct *tsk; int woken; - rwsemtrace(sem, "Entering __rwsem_do_wake"); - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { @@ -104,7 +89,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) sem->activity += woken; out: - rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; } @@ -138,8 +122,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_read"); - spin_lock_irq(&sem->wait_lock); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { @@ -171,9 +153,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) } tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem, "Leaving __down_read"); + ; } /* @@ -184,7 +165,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) unsigned long flags; int ret = 0; - rwsemtrace(sem, "Entering __down_read_trylock"); spin_lock_irqsave(&sem->wait_lock, flags); @@ -196,7 +176,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving __down_read_trylock"); return ret; } @@ -209,8 +188,6 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem, "Entering __down_write"); - spin_lock_irq(&sem->wait_lock); if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -242,9 +219,8 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) } tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem, "Leaving __down_write"); + ; } /* @@ -255,8 +231,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) unsigned long flags; int ret = 0; - rwsemtrace(sem, "Entering __down_write_trylock"); - spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { @@ -267,7 +241,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; } @@ -278,16 +251,12 @@ void fastcall __up_read(struct rw_semaphore *sem) { unsigned long flags; - rwsemtrace(sem, "Entering __up_read"); - spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock_irqrestore(&sem->wait_lock, flags); - - rwsemtrace(sem, "Leaving __up_read"); } /* @@ -297,8 +266,6 @@ void fastcall __up_write(struct rw_semaphore *sem) { unsigned long flags; - rwsemtrace(sem, "Entering __up_write"); - spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; @@ -306,8 +273,6 @@ void fastcall __up_write(struct rw_semaphore *sem) sem = __rwsem_do_wake(sem, 1); spin_unlock_irqrestore(&sem->wait_lock, flags); - - rwsemtrace(sem, "Leaving __up_write"); } /* @@ -318,8 +283,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) { unsigned long flags; - rwsemtrace(sem, "Entering __downgrade_write"); - spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; @@ -327,8 +290,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) sem = __rwsem_do_wake(sem, 0); spin_unlock_irqrestore(&sem->wait_lock, flags); - - rwsemtrace(sem, "Leaving __downgrade_write"); } EXPORT_SYMBOL(init_rwsem); @@ -339,6 +300,3 @@ EXPORT_SYMBOL(__down_write_trylock); EXPORT_SYMBOL(__up_read); EXPORT_SYMBOL(__up_write); EXPORT_SYMBOL(__downgrade_write); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif diff --git a/lib/rwsem.c b/lib/rwsem.c index 62fa4eba9ffe..bae597284889 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -16,17 +16,6 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; -#if RWSEM_DEBUG -#undef rwsemtrace -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - printk("sem=%p\n", sem); - printk("(sem)=%08lx\n", sem->count); - if (sem->debug) - printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); -} -#endif - /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: @@ -45,8 +34,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) struct list_head *next; signed long oldcount, woken, loop; - rwsemtrace(sem, "Entering __rwsem_do_wake"); - if (downgrading) goto dont_wake_writers; @@ -127,7 +114,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) next->prev = &sem->wait_list; out: - rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; /* undo the change to count, but check for a transition 1->0 */ @@ -186,13 +172,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem, "Entering rwsem_down_read_failed"); - waiter.flags = RWSEM_WAITING_FOR_READ; rwsem_down_failed_common(sem, &waiter, RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); - - rwsemtrace(sem, "Leaving rwsem_down_read_failed"); return sem; } @@ -204,12 +186,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem, "Entering rwsem_down_write_failed"); - waiter.flags = RWSEM_WAITING_FOR_WRITE; rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); - rwsemtrace(sem, "Leaving rwsem_down_write_failed"); return sem; } @@ -221,8 +200,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { unsigned long flags; - rwsemtrace(sem, "Entering rwsem_wake"); - spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ @@ -231,8 +208,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving rwsem_wake"); - return sem; } @@ -245,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { unsigned long flags; - rwsemtrace(sem, "Entering rwsem_downgrade_wake"); - spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ @@ -255,7 +228,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); - rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); return sem; } @@ -263,6 +235,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed); EXPORT_SYMBOL(rwsem_down_write_failed); EXPORT_SYMBOL(rwsem_wake); EXPORT_SYMBOL(rwsem_downgrade_wake); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif -- cgit v1.2.3 From 9a11b49a805665e13a56aa067afaf81d43ec1514 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:33 -0700 Subject: [PATCH] lockdep: better lock debugging Generic lock debugging: - generalized lock debugging framework. For example, a bug in one lock subsystem turns off debugging in all lock subsystems. - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from the mutex/rtmutex debugging code: it caused way too much prototype hackery, and lockdep will give the same information anyway. - ability to do silent tests - check lock freeing in vfree too. - more finegrained debugging options, to allow distributions to turn off more expensive debugging features. There's no separate 'held mutexes' list anymore - but there's a 'held locks' stack within lockdep, which unifies deadlock detection across all lock classes. (this is independent of the lockdep validation stuff - lockdep first checks whether we are holding a lock already) Here are the current debugging options: CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y which do: config DEBUG_MUTEXES bool "Mutex debugging, basic checks" config DEBUG_LOCK_ALLOC bool "Detect incorrect freeing of live mutexes" Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/sysrq.c | 2 +- include/asm-generic/mutex-null.h | 15 +- include/linux/debug_locks.h | 69 +++++++++ include/linux/init_task.h | 1 - include/linux/mm.h | 8 +- include/linux/mutex-debug.h | 12 +- include/linux/mutex.h | 6 - include/linux/rtmutex.h | 10 -- include/linux/sched.h | 4 - init/main.c | 8 ++ kernel/exit.c | 5 +- kernel/fork.c | 4 - kernel/mutex-debug.c | 51 +------ kernel/mutex-debug.h | 82 ++--------- kernel/mutex.c | 52 +++---- kernel/mutex.h | 17 +-- kernel/rtmutex-debug.c | 302 ++------------------------------------- kernel/rtmutex-debug.h | 8 +- kernel/rtmutex.c | 46 +++--- kernel/rtmutex.h | 3 - kernel/sched.c | 16 ++- lib/Makefile | 2 +- lib/debug_locks.c | 45 ++++++ lib/spinlock_debug.c | 62 ++++---- mm/vmalloc.c | 2 + 25 files changed, 265 insertions(+), 567 deletions(-) create mode 100644 include/linux/debug_locks.h create mode 100644 lib/debug_locks.c (limited to 'include/linux') diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index a064ee9181c0..e31f079400fa 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -151,7 +151,7 @@ static struct sysrq_key_op sysrq_mountro_op = { static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, struct tty_struct *tty) { - mutex_debug_show_all_locks(); + debug_show_all_locks(); } static struct sysrq_key_op sysrq_showlocks_op = { .handler = sysrq_handle_showlocks, diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h index 5cf8b7ce0c45..254a126ede5c 100644 --- a/include/asm-generic/mutex-null.h +++ b/include/asm-generic/mutex-null.h @@ -10,15 +10,10 @@ #ifndef _ASM_GENERIC_MUTEX_NULL_H #define _ASM_GENERIC_MUTEX_NULL_H -/* extra parameter only needed for mutex debugging: */ -#ifndef __IP__ -# define __IP__ -#endif - -#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) -#define __mutex_slowpath_needs_to_unlock() 1 +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) +#define __mutex_slowpath_needs_to_unlock() 1 #endif diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h new file mode 100644 index 000000000000..6a7047851e48 --- /dev/null +++ b/include/linux/debug_locks.h @@ -0,0 +1,69 @@ +#ifndef __LINUX_DEBUG_LOCKING_H +#define __LINUX_DEBUG_LOCKING_H + +extern int debug_locks; +extern int debug_locks_silent; + +/* + * Generic 'turn off all lock debugging' function: + */ +extern int debug_locks_off(void); + +/* + * In the debug case we carry the caller's instruction pointer into + * other functions, but we dont want the function argument overhead + * in the nondebug case - hence these macros: + */ +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#define DEBUG_LOCKS_WARN_ON(c) \ +({ \ + int __ret = 0; \ + \ + if (unlikely(c)) { \ + if (debug_locks_off()) \ + WARN_ON(1); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +#else +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS + extern void locking_selftest(void); +#else +# define locking_selftest() do { } while (0) +#endif + +#ifdef CONFIG_LOCKDEP +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(struct task_struct *task); +#else +static inline void debug_show_all_locks(void) +{ +} + +static inline void debug_show_held_locks(struct task_struct *task) +{ +} + +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ +} + +static inline void +debug_check_no_locks_held(struct task_struct *task) +{ +} +#endif + +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a256957fb56..678c1a90380d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -124,7 +124,6 @@ extern struct group_info init_groups; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ - INIT_RT_MUTEXES(tsk) \ } diff --git a/include/linux/mm.h b/include/linux/mm.h index 75179529e399..990957e0929f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -14,6 +14,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ -static inline void -debug_check_no_locks_freed(const void *from, unsigned long len) -{ - mutex_debug_check_no_locks_freed(from, len); - rt_mutex_debug_check_no_locks_freed(from, len); -} - #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h index 8b5769f00467..70a26091fc73 100644 --- a/include/linux/mutex-debug.h +++ b/include/linux/mutex-debug.h @@ -7,17 +7,11 @@ * Mutexes - debugging helpers: */ -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .held_list = LIST_HEAD_INIT(lockname.held_list), \ - .name = #lockname , .magic = &lockname +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname -#define mutex_init(sem) __mutex_init(sem, __FUNCTION__) +#define mutex_init(sem) __mutex_init(sem, __FILE__":"#sem) extern void FASTCALL(mutex_destroy(struct mutex *lock)); -extern void mutex_debug_show_all_locks(void); -extern void mutex_debug_show_held_locks(struct task_struct *filter); -extern void mutex_debug_check_no_locks_held(struct task_struct *task); -extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); - #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index f1ac507fa20d..caafecd5e366 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -50,8 +50,6 @@ struct mutex { struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES struct thread_info *owner; - struct list_head held_list; - unsigned long acquire_ip; const char *name; void *magic; #endif @@ -76,10 +74,6 @@ struct mutex_waiter { # define __DEBUG_MUTEX_INITIALIZER(lockname) # define mutex_init(mutex) __mutex_init(mutex, NULL) # define mutex_destroy(mutex) do { } while (0) -# define mutex_debug_show_all_locks() do { } while (0) -# define mutex_debug_show_held_locks(p) do { } while (0) -# define mutex_debug_check_no_locks_held(task) do { } while (0) -# define mutex_debug_check_no_locks_freed(from, len) do { } while (0) #endif #define __MUTEX_INITIALIZER(lockname) \ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index fa4a3b82ba70..5d41dee82f80 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -29,8 +29,6 @@ struct rt_mutex { struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; - struct list_head held_list_entry; - unsigned long acquire_ip; const char *name, *file; int line; void *magic; @@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define INIT_RT_MUTEX_DEBUG(tsk) \ - .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ - .held_list_lock = SPIN_LOCK_UNLOCKED -#else -# define INIT_RT_MUTEX_DEBUG(tsk) -#endif - #ifdef CONFIG_RT_MUTEXES # define INIT_RT_MUTEXES(tsk) \ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index aaf723308ed4..bdabeee10a78 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -865,10 +865,6 @@ struct task_struct { struct plist_head pi_waiters; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; -# ifdef CONFIG_DEBUG_RT_MUTEXES - spinlock_t held_list_lock; - struct list_head held_list_head; -# endif #endif #ifdef CONFIG_DEBUG_MUTEXES diff --git a/init/main.c b/init/main.c index d604dfef82b8..fc73e1cd8614 100644 --- a/init/main.c +++ b/init/main.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -511,6 +512,13 @@ asmlinkage void __init start_kernel(void) console_init(); if (panic_later) panic(panic_later, panic_param); + /* + * Need to run this when irqs are enabled, because it wants + * to self-test [hard/soft]-irqs on/off lock inversion bugs + * too: + */ + locking_selftest(); + #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && initrd_start < min_low_pfn << PAGE_SHIFT) { diff --git a/kernel/exit.c b/kernel/exit.c index 7f7ef2258553..c595db14cf25 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -933,10 +933,9 @@ fastcall NORET_TYPE void do_exit(long code) if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); /* - * If DEBUG_MUTEXES is on, make sure we are holding no locks: + * Make sure we are holding no locks: */ - mutex_debug_check_no_locks_held(tsk); - rt_mutex_debug_check_no_locks_held(tsk); + debug_check_no_locks_held(tsk); if (tsk->io_context) exit_io_context(); diff --git a/kernel/fork.c b/kernel/fork.c index 9064bf9e131b..1cd46a4fb0d3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -919,10 +919,6 @@ static inline void rt_mutex_init_task(struct task_struct *p) spin_lock_init(&p->pi_lock); plist_head_init(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; -# ifdef CONFIG_DEBUG_RT_MUTEXES - spin_lock_init(&p->held_list_lock); - INIT_LIST_HEAD(&p->held_list_head); -# endif #endif } diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index a92de145ed0d..5569766a1ea2 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c @@ -20,52 +20,19 @@ #include #include #include +#include #include "mutex-debug.h" -/* - * We need a global lock when we walk through the multi-process - * lock tree. Only used in the deadlock-debugging case. - */ -DEFINE_SPINLOCK(debug_mutex_lock); - -/* - * All locks held by all tasks, in a single global list: - */ -LIST_HEAD(debug_mutex_held_locks); - -/* - * In the debug case we carry the caller's instruction pointer into - * other functions, but we dont want the function argument overhead - * in the nondebug case - hence these macros: - */ -#define __IP_DECL__ , unsigned long ip -#define __IP__ , ip -#define __RET_IP__ , (unsigned long)__builtin_return_address(0) - -/* - * "mutex debugging enabled" flag. We turn it off when we detect - * the first problem because we dont want to recurse back - * into the tracing code when doing error printk or - * executing a BUG(): - */ -int debug_mutex_on = 1; - /* * Must be called with lock->wait_lock held. */ -void debug_mutex_set_owner(struct mutex *lock, - struct thread_info *new_owner __IP_DECL__) +void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner) { lock->owner = new_owner; - DEBUG_LOCKS_WARN_ON(!list_empty(&lock->held_list)); - if (debug_mutex_on) { - list_add_tail(&lock->held_list, &debug_mutex_held_locks); - lock->acquire_ip = ip; - } } -void debug_mutex_init_waiter(struct mutex_waiter *waiter) +void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) { memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); waiter->magic = waiter; @@ -87,9 +54,10 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) } void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti __IP_DECL__) + struct thread_info *ti) { SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); + /* Mark the current thread as blocked on the lock: */ ti->task->blocked_on = waiter; waiter->lock = lock; @@ -109,13 +77,10 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, void debug_mutex_unlock(struct mutex *lock) { + DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); - if (debug_mutex_on) { - DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list)); - list_del_init(&lock->held_list); - } } void debug_mutex_init(struct mutex *lock, const char *name) @@ -123,10 +88,8 @@ void debug_mutex_init(struct mutex *lock, const char *name) /* * Make sure we are not reinitializing a held lock: */ - mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lock->owner = NULL; - INIT_LIST_HEAD(&lock->held_list); - lock->name = name; lock->magic = lock; } diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index bdab13a9ee26..babfbdfc534b 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -10,102 +10,44 @@ * More details are in kernel/mutex-debug.c. */ -extern spinlock_t debug_mutex_lock; -extern struct list_head debug_mutex_held_locks; -extern int debug_mutex_on; - -/* - * In the debug case we carry the caller's instruction pointer into - * other functions, but we dont want the function argument overhead - * in the nondebug case - hence these macros: - */ -#define __IP_DECL__ , unsigned long ip -#define __IP__ , ip -#define __RET_IP__ , (unsigned long)__builtin_return_address(0) - /* * This must be called with lock->wait_lock held. */ -extern void debug_mutex_set_owner(struct mutex *lock, - struct thread_info *new_owner __IP_DECL__); +extern void +debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner); static inline void debug_mutex_clear_owner(struct mutex *lock) { lock->owner = NULL; } -extern void debug_mutex_init_waiter(struct mutex_waiter *waiter); +extern void debug_mutex_lock_common(struct mutex *lock, + struct mutex_waiter *waiter); extern void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter); extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); extern void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti __IP_DECL__); + struct thread_info *ti); extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct thread_info *ti); extern void debug_mutex_unlock(struct mutex *lock); -extern void debug_mutex_init(struct mutex *lock, const char *name); - -#define debug_spin_lock_save(lock, flags) \ - do { \ - local_irq_save(flags); \ - if (debug_mutex_on) \ - spin_lock(lock); \ - } while (0) - -#define debug_spin_unlock_restore(lock, flags) \ - do { \ - if (debug_mutex_on) \ - spin_unlock(lock); \ - local_irq_restore(flags); \ - preempt_check_resched(); \ - } while (0) +extern void debug_mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); #define spin_lock_mutex(lock, flags) \ do { \ struct mutex *l = container_of(lock, struct mutex, wait_lock); \ \ DEBUG_LOCKS_WARN_ON(in_interrupt()); \ - debug_spin_lock_save(&debug_mutex_lock, flags); \ - spin_lock(lock); \ + local_irq_save(flags); \ + __raw_spin_lock(&(lock)->raw_lock); \ DEBUG_LOCKS_WARN_ON(l->magic != l); \ } while (0) #define spin_unlock_mutex(lock, flags) \ do { \ - spin_unlock(lock); \ - debug_spin_unlock_restore(&debug_mutex_lock, flags); \ + __raw_spin_unlock(&(lock)->raw_lock); \ + local_irq_restore(flags); \ + preempt_check_resched(); \ } while (0) - -#define DEBUG_OFF() \ -do { \ - if (debug_mutex_on) { \ - debug_mutex_on = 0; \ - console_verbose(); \ - if (spin_is_locked(&debug_mutex_lock)) \ - spin_unlock(&debug_mutex_lock); \ - } \ -} while (0) - -#define DEBUG_BUG() \ -do { \ - if (debug_mutex_on) { \ - DEBUG_OFF(); \ - BUG(); \ - } \ -} while (0) - -#define DEBUG_LOCKS_WARN_ON(c) \ -do { \ - if (unlikely(c && debug_mutex_on)) { \ - DEBUG_OFF(); \ - WARN_ON(1); \ - } \ -} while (0) - -#ifdef CONFIG_SMP -# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) -#else -# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) -#endif - diff --git a/kernel/mutex.c b/kernel/mutex.c index 101ceeb38925..3aad0b7992f4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -17,6 +17,7 @@ #include #include #include +#include /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, @@ -38,7 +39,7 @@ * * It is not allowed to initialize an already locked mutex. */ -void fastcall __mutex_init(struct mutex *lock, const char *name) +__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) { atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); @@ -56,7 +57,7 @@ EXPORT_SYMBOL(__mutex_init); * branch is predicted by the CPU as default-untaken. */ static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); +__mutex_lock_slowpath(atomic_t *lock_count); /*** * mutex_lock - acquire the mutex @@ -79,7 +80,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); * * This function is similar to (but not equivalent to) down(). */ -void fastcall __sched mutex_lock(struct mutex *lock) +void inline fastcall __sched mutex_lock(struct mutex *lock) { might_sleep(); /* @@ -92,7 +93,7 @@ void fastcall __sched mutex_lock(struct mutex *lock) EXPORT_SYMBOL(mutex_lock); static void fastcall noinline __sched -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); +__mutex_unlock_slowpath(atomic_t *lock_count); /*** * mutex_unlock - release the mutex @@ -120,18 +121,17 @@ EXPORT_SYMBOL(mutex_unlock); * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched -__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) { struct task_struct *task = current; struct mutex_waiter waiter; unsigned int old_val; unsigned long flags; - debug_mutex_init_waiter(&waiter); - spin_lock_mutex(&lock->wait_lock, flags); - debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); + debug_mutex_lock_common(lock, &waiter); + debug_mutex_add_waiter(lock, &waiter, task->thread_info); /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); @@ -173,7 +173,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) /* got the lock - rejoice! */ mutex_remove_waiter(lock, &waiter, task->thread_info); - debug_mutex_set_owner(lock, task->thread_info __IP__); + debug_mutex_set_owner(lock, task->thread_info); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) @@ -183,32 +183,28 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) debug_mutex_free_waiter(&waiter); - DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list)); - DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info); - return 0; } static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) +__mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); } /* * Release the lock, slowpath: */ -static fastcall noinline void -__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) +static fastcall inline void +__mutex_unlock_common_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); - spin_lock_mutex(&lock->wait_lock, flags); + debug_mutex_unlock(lock); /* * some architectures leave the lock unlocked in the fastpath failure @@ -218,8 +214,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) if (__mutex_slowpath_needs_to_unlock()) atomic_set(&lock->count, 1); - debug_mutex_unlock(lock); - if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ struct mutex_waiter *waiter = @@ -236,12 +230,21 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) spin_unlock_mutex(&lock->wait_lock, flags); } +/* + * Release the lock, slowpath: + */ +static fastcall noinline void +__mutex_unlock_slowpath(atomic_t *lock_count) +{ + __mutex_unlock_common_slowpath(lock_count); +} + /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); +__mutex_lock_interruptible_slowpath(atomic_t *lock_count); /*** * mutex_lock_interruptible - acquire the mutex, interruptable @@ -264,11 +267,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); static int fastcall noinline __sched -__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) +__mutex_lock_interruptible_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); } /* @@ -285,7 +288,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) prev = atomic_xchg(&lock->count, -1); if (likely(prev == 1)) - debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); + debug_mutex_set_owner(lock, current_thread_info()); + /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); diff --git a/kernel/mutex.h b/kernel/mutex.h index 7e1ed48d1a6c..aeb2d916aa0e 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h @@ -16,22 +16,15 @@ #define mutex_remove_waiter(lock, waiter, ti) \ __list_del((waiter)->list.prev, (waiter)->list.next) -#define DEBUG_LOCKS_WARN_ON(c) do { } while (0) #define debug_mutex_set_owner(lock, new_owner) do { } while (0) #define debug_mutex_clear_owner(lock) do { } while (0) -#define debug_mutex_init_waiter(waiter) do { } while (0) #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) #define debug_mutex_free_waiter(waiter) do { } while (0) -#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0) +#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) #define debug_mutex_unlock(lock) do { } while (0) #define debug_mutex_init(lock, name) do { } while (0) -/* - * Return-address parameters/declarations. They are very useful for - * debugging, but add overhead in the !DEBUG case - so we go the - * trouble of using this not too elegant but zero-cost solution: - */ -#define __IP_DECL__ -#define __IP__ -#define __RET_IP__ - +static inline void +debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) +{ +} diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 4aa8a2c9f453..353a853bc390 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "rtmutex_common.h" @@ -45,8 +46,6 @@ do { \ console_verbose(); \ if (spin_is_locked(¤t->pi_lock)) \ spin_unlock(¤t->pi_lock); \ - if (spin_is_locked(¤t->held_list_lock)) \ - spin_unlock(¤t->held_list_lock); \ } \ } while (0) @@ -105,14 +104,6 @@ static void printk_task(task_t *p) printk(""); } -static void printk_task_short(task_t *p) -{ - if (p) - printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); - else - printk(""); -} - static void printk_lock(struct rt_mutex *lock, int print_owner) { if (lock->name) @@ -128,222 +119,6 @@ static void printk_lock(struct rt_mutex *lock, int print_owner) printk_task(rt_mutex_owner(lock)); printk("\n"); } - if (rt_mutex_owner(lock)) { - printk("... acquired at: "); - print_symbol("%s\n", lock->acquire_ip); - } -} - -static void printk_waiter(struct rt_mutex_waiter *w) -{ - printk("-------------------------\n"); - printk("| waiter struct %p:\n", w); - printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", - w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next, - w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next, - w->list_entry.prio); - printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", - w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next, - w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next, - w->pi_list_entry.prio); - printk("\n| lock:\n"); - printk_lock(w->lock, 1); - printk("| w->ti->task:\n"); - printk_task(w->task); - printk("| blocked at: "); - print_symbol("%s\n", w->ip); - printk("-------------------------\n"); -} - -static void show_task_locks(task_t *p) -{ - switch (p->state) { - case TASK_RUNNING: printk("R"); break; - case TASK_INTERRUPTIBLE: printk("S"); break; - case TASK_UNINTERRUPTIBLE: printk("D"); break; - case TASK_STOPPED: printk("T"); break; - case EXIT_ZOMBIE: printk("Z"); break; - case EXIT_DEAD: printk("X"); break; - default: printk("?"); break; - } - printk_task(p); - if (p->pi_blocked_on) { - struct rt_mutex *lock = p->pi_blocked_on->lock; - - printk(" blocked on:"); - printk_lock(lock, 1); - } else - printk(" (not blocked)\n"); -} - -void rt_mutex_show_held_locks(task_t *task, int verbose) -{ - struct list_head *curr, *cursor = NULL; - struct rt_mutex *lock; - task_t *t; - unsigned long flags; - int count = 0; - - if (!rt_trace_on) - return; - - if (verbose) { - printk("------------------------------\n"); - printk("| showing all locks held by: | ("); - printk_task_short(task); - printk("):\n"); - printk("------------------------------\n"); - } - -next: - spin_lock_irqsave(&task->held_list_lock, flags); - list_for_each(curr, &task->held_list_head) { - if (cursor && curr != cursor) - continue; - lock = list_entry(curr, struct rt_mutex, held_list_entry); - t = rt_mutex_owner(lock); - WARN_ON(t != task); - count++; - cursor = curr->next; - spin_unlock_irqrestore(&task->held_list_lock, flags); - - printk("\n#%03d: ", count); - printk_lock(lock, 0); - goto next; - } - spin_unlock_irqrestore(&task->held_list_lock, flags); - - printk("\n"); -} - -void rt_mutex_show_all_locks(void) -{ - task_t *g, *p; - int count = 10; - int unlock = 1; - - printk("\n"); - printk("----------------------\n"); - printk("| showing all tasks: |\n"); - printk("----------------------\n"); - - /* - * Here we try to get the tasklist_lock as hard as possible, - * if not successful after 2 seconds we ignore it (but keep - * trying). This is to enable a debug printout even if a - * tasklist_lock-holding task deadlocks or crashes. - */ -retry: - if (!read_trylock(&tasklist_lock)) { - if (count == 10) - printk("hm, tasklist_lock locked, retrying... "); - if (count) { - count--; - printk(" #%d", 10-count); - mdelay(200); - goto retry; - } - printk(" ignoring it.\n"); - unlock = 0; - } - if (count != 10) - printk(" locked it.\n"); - - do_each_thread(g, p) { - show_task_locks(p); - if (!unlock) - if (read_trylock(&tasklist_lock)) - unlock = 1; - } while_each_thread(g, p); - - printk("\n"); - - printk("-----------------------------------------\n"); - printk("| showing all locks held in the system: |\n"); - printk("-----------------------------------------\n"); - - do_each_thread(g, p) { - rt_mutex_show_held_locks(p, 0); - if (!unlock) - if (read_trylock(&tasklist_lock)) - unlock = 1; - } while_each_thread(g, p); - - - printk("=============================================\n\n"); - - if (unlock) - read_unlock(&tasklist_lock); -} - -void rt_mutex_debug_check_no_locks_held(task_t *task) -{ - struct rt_mutex_waiter *w; - struct list_head *curr; - struct rt_mutex *lock; - - if (!rt_trace_on) - return; - if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) { - printk("BUG: PI priority boost leaked!\n"); - printk_task(task); - printk("\n"); - } - if (list_empty(&task->held_list_head)) - return; - - spin_lock(&task->pi_lock); - plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) { - TRACE_OFF(); - - printk("hm, PI interest held at exit time? Task:\n"); - printk_task(task); - printk_waiter(w); - return; - } - spin_unlock(&task->pi_lock); - - list_for_each(curr, &task->held_list_head) { - lock = list_entry(curr, struct rt_mutex, held_list_entry); - - printk("BUG: %s/%d, lock held at task exit time!\n", - task->comm, task->pid); - printk_lock(lock, 1); - if (rt_mutex_owner(lock) != task) - printk("exiting task is not even the owner??\n"); - } -} - -int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) -{ - const void *to = from + len; - struct list_head *curr; - struct rt_mutex *lock; - unsigned long flags; - void *lock_addr; - - if (!rt_trace_on) - return 0; - - spin_lock_irqsave(¤t->held_list_lock, flags); - list_for_each(curr, ¤t->held_list_head) { - lock = list_entry(curr, struct rt_mutex, held_list_entry); - lock_addr = lock; - if (lock_addr < from || lock_addr >= to) - continue; - TRACE_OFF(); - - printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", - current->comm, current->pid, lock, from, to); - dump_stack(); - printk_lock(lock, 1); - if (rt_mutex_owner(lock) != current) - printk("freeing task is not even the owner??\n"); - return 1; - } - spin_unlock_irqrestore(¤t->held_list_lock, flags); - - return 0; } void rt_mutex_debug_task_free(struct task_struct *task) @@ -395,85 +170,41 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) current->comm, current->pid); printk_lock(waiter->lock, 1); - printk("... trying at: "); - print_symbol("%s\n", waiter->ip); - printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); printk_lock(waiter->deadlock_lock, 1); - rt_mutex_show_held_locks(current, 1); - rt_mutex_show_held_locks(task, 1); + debug_show_held_locks(current); + debug_show_held_locks(task); printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); show_stack(task, NULL); printk("\n%s/%d's [current] stackdump:\n\n", current->comm, current->pid); dump_stack(); - rt_mutex_show_all_locks(); + debug_show_all_locks(); + printk("[ turning off deadlock detection." "Please report this trace. ]\n\n"); local_irq_disable(); } -void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) +void debug_rt_mutex_lock(struct rt_mutex *lock) { - unsigned long flags; - - if (rt_trace_on) { - TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); - - spin_lock_irqsave(¤t->held_list_lock, flags); - list_add_tail(&lock->held_list_entry, ¤t->held_list_head); - spin_unlock_irqrestore(¤t->held_list_lock, flags); - - lock->acquire_ip = ip; - } } void debug_rt_mutex_unlock(struct rt_mutex *lock) { - unsigned long flags; - - if (rt_trace_on) { - TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); - TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); - - spin_lock_irqsave(¤t->held_list_lock, flags); - list_del_init(&lock->held_list_entry); - spin_unlock_irqrestore(¤t->held_list_lock, flags); - } + TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); } -void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner __IP_DECL__) +void +debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) { - unsigned long flags; - - if (rt_trace_on) { - TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); - - spin_lock_irqsave(&powner->held_list_lock, flags); - list_add_tail(&lock->held_list_entry, &powner->held_list_head); - spin_unlock_irqrestore(&powner->held_list_lock, flags); - - lock->acquire_ip = ip; - } } void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) { - unsigned long flags; - - if (rt_trace_on) { - struct task_struct *owner = rt_mutex_owner(lock); - - TRACE_WARN_ON_LOCKED(!owner); - TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); - - spin_lock_irqsave(&owner->held_list_lock, flags); - list_del_init(&lock->held_list_entry); - spin_unlock_irqrestore(&owner->held_list_lock, flags); - } + TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); } void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) @@ -493,14 +224,11 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) { - void *addr = lock; - - if (rt_trace_on) { - rt_mutex_debug_check_no_locks_freed(addr, - sizeof(struct rt_mutex)); - INIT_LIST_HEAD(&lock->held_list_entry); - lock->name = name; - } + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lock->name = name; } void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h index 7612fbc62d70..14193d596d78 100644 --- a/kernel/rtmutex-debug.h +++ b/kernel/rtmutex-debug.h @@ -9,20 +9,16 @@ * This file contains macros used solely by rtmutex.c. Debug version. */ -#define __IP_DECL__ , unsigned long ip -#define __IP__ , ip -#define __RET_IP__ , (unsigned long)__builtin_return_address(0) - extern void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); -extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); +extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner __IP_DECL__); + struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, struct rt_mutex *lock); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 45d61016da57..91b699aa658b 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -161,8 +161,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, - struct task_struct *top_task - __IP_DECL__) + struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; @@ -357,7 +356,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) * * Must be called with lock->wait_lock held. */ -static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) +static int try_to_take_rt_mutex(struct rt_mutex *lock) { /* * We have to be careful here if the atomic speedups are @@ -384,7 +383,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) return 0; /* We got the lock. */ - debug_rt_mutex_lock(lock __IP__); + debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, current, 0); @@ -402,8 +401,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, - int detect_deadlock - __IP_DECL__) + int detect_deadlock) { struct rt_mutex_waiter *top_waiter = waiter; task_t *owner = rt_mutex_owner(lock); @@ -454,7 +452,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, - current __IP__); + current); spin_lock(&lock->wait_lock); @@ -526,7 +524,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter __IP_DECL__) + struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); int boost = 0; @@ -568,7 +566,7 @@ static void remove_waiter(struct rt_mutex *lock, spin_unlock(&lock->wait_lock); - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); + rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); spin_lock(&lock->wait_lock); } @@ -595,7 +593,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) get_task_struct(task); spin_unlock_irqrestore(&task->pi_lock, flags); - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } /* @@ -604,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__) + int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; @@ -615,7 +613,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock __IP__)) { + if (try_to_take_rt_mutex(lock)) { spin_unlock(&lock->wait_lock); return 0; } @@ -629,7 +627,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, for (;;) { /* Try to acquire the lock: */ - if (try_to_take_rt_mutex(lock __IP__)) + if (try_to_take_rt_mutex(lock)) break; /* @@ -653,7 +651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, - detect_deadlock __IP__); + detect_deadlock); /* * If we got woken up by the owner then start loop * all over without going into schedule to try @@ -680,7 +678,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) - remove_waiter(lock, &waiter __IP__); + remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit @@ -711,7 +709,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * Slow path try-lock function: */ static inline int -rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) +rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; @@ -719,7 +717,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) if (likely(rt_mutex_owner(lock) != current)) { - ret = try_to_take_rt_mutex(lock __IP__); + ret = try_to_take_rt_mutex(lock); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. @@ -769,13 +767,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); + return slowfn(lock, state, NULL, detect_deadlock); } static inline int @@ -783,24 +781,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock __IP_DECL__)) + int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); + return slowfn(lock, state, timeout, detect_deadlock); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, - int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) + int (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 1; } - return slowfn(lock __RET_IP__); + return slowfn(lock); } static inline void @@ -948,7 +946,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL); - debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); + debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner, 0); rt_mutex_deadlock_account_lock(lock, proxy_owner); } diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h index 1e0fca13ff72..a1a1dd06421d 100644 --- a/kernel/rtmutex.h +++ b/kernel/rtmutex.h @@ -10,9 +10,6 @@ * Non-debug version. */ -#define __IP_DECL__ -#define __IP__ -#define __RET_IP__ #define rt_mutex_deadlock_check(l) (0) #define rt_mutex_deadlock_account_lock(m, t) do { } while (0) #define rt_mutex_deadlock_account_unlock(l) do { } while (0) diff --git a/kernel/sched.c b/kernel/sched.c index d5e37072ea54..48c1faa60a67 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -3142,12 +3143,13 @@ void fastcall add_preempt_count(int val) /* * Underflow? */ - BUG_ON((preempt_count() < 0)); + if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) + return; preempt_count() += val; /* * Spinlock count overflowing soon? */ - BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); } EXPORT_SYMBOL(add_preempt_count); @@ -3156,11 +3158,15 @@ void fastcall sub_preempt_count(int val) /* * Underflow? */ - BUG_ON(val > preempt_count()); + if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) + return; /* * Is the spinlock portion underflowing? */ - BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK)); + if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && + !(preempt_count() & PREEMPT_MASK))) + return; + preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); @@ -4690,7 +4696,7 @@ void show_state(void) } while_each_thread(g, p); read_unlock(&tasklist_lock); - mutex_debug_show_all_locks(); + debug_show_all_locks(); } /** diff --git a/lib/Makefile b/lib/Makefile index 10c13c9d7824..4f5d01922f82 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,7 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o kobject_uevent.o klist.o -obj-y += sort.o parser.o halfmd4.o iomap_copy.o +obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/debug_locks.c b/lib/debug_locks.c new file mode 100644 index 000000000000..0ef01d14727c --- /dev/null +++ b/lib/debug_locks.c @@ -0,0 +1,45 @@ +/* + * lib/debug_locks.c + * + * Generic place for common debugging facilities for various locks: + * spinlocks, rwlocks, mutexes and rwsems. + * + * Started by Ingo Molnar: + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + */ +#include +#include +#include +#include +#include + +/* + * We want to turn all lock-debugging facilities on/off at once, + * via a global flag. The reason is that once a single bug has been + * detected and reported, there might be cascade of followup bugs + * that would just muddy the log. So we report the first one and + * shut up after that. + */ +int debug_locks = 1; + +/* + * The locking-testsuite uses to get a + * 'silent failure': nothing is printed to the console when + * a locking bug is detected. + */ +int debug_locks_silent; + +/* + * Generic 'turn off all lock debugging' function: + */ +int debug_locks_off(void) +{ + if (xchg(&debug_locks, 0)) { + if (!debug_locks_silent) { + console_verbose(); + return 1; + } + } + return 0; +} diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 93c15ee3f8ea..3de2ccf48ac6 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -8,38 +8,35 @@ #include #include +#include #include +#include static void spin_bug(spinlock_t *lock, const char *msg) { - static long print_once = 1; struct task_struct *owner = NULL; - if (xchg(&print_once, 0)) { - if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) - owner = lock->owner; - printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", - msg, raw_smp_processor_id(), - current->comm, current->pid); - printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " - ".owner_cpu: %d\n", - lock, lock->magic, - owner ? owner->comm : "", - owner ? owner->pid : -1, - lock->owner_cpu); - dump_stack(); -#ifdef CONFIG_SMP - /* - * We cannot continue on SMP: - */ -// panic("bad locking"); -#endif - } + if (!debug_locks_off()) + return; + + if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) + owner = lock->owner; + printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", + msg, raw_smp_processor_id(), + current->comm, current->pid); + printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " + ".owner_cpu: %d\n", + lock, lock->magic, + owner ? owner->comm : "", + owner ? owner->pid : -1, + lock->owner_cpu); + dump_stack(); } #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) -static inline void debug_spin_lock_before(spinlock_t *lock) +static inline void +debug_spin_lock_before(spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -118,20 +115,13 @@ void _raw_spin_unlock(spinlock_t *lock) static void rwlock_bug(rwlock_t *lock, const char *msg) { - static long print_once = 1; - - if (xchg(&print_once, 0)) { - printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", - msg, raw_smp_processor_id(), current->comm, - current->pid, lock); - dump_stack(); -#ifdef CONFIG_SMP - /* - * We cannot continue on SMP: - */ - panic("bad locking"); -#endif - } + if (!debug_locks_off()) + return; + + printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", + msg, raw_smp_processor_id(), current->comm, + current->pid, lock); + dump_stack(); } #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35f8553f893a..7b450798b458 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -330,6 +330,8 @@ void __vunmap(void *addr, int deallocate_pages) return; } + debug_check_no_locks_freed(addr, area->size); + if (deallocate_pages) { int i; -- cgit v1.2.3 From e4d919188554a77c798a267e098059bc9aa39726 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:34 -0700 Subject: [PATCH] lockdep: locking init debugging improvement Locking init improvement: - introduce and use __SPIN_LOCK_UNLOCKED for array initializations, to pass in the name string of locks, used by debugging Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/random.c | 6 +++--- fs/dcache.c | 2 +- include/linux/idr.h | 2 +- include/linux/init_task.h | 10 +++++----- include/linux/notifier.h | 2 +- include/linux/seqlock.h | 12 ++++++++++-- include/linux/spinlock_types.h | 15 +++++++++------ include/linux/wait.h | 2 +- kernel/rcupdate.c | 4 ++-- kernel/timer.c | 2 +- mm/swap_state.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/tcp_minisocks.c | 2 +- 13 files changed, 37 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index 164bddae047f..4c3a5ca9d8f7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -416,7 +416,7 @@ static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], .name = "input", .limit = 1, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), .pool = input_pool_data }; @@ -425,7 +425,7 @@ static struct entropy_store blocking_pool = { .name = "blocking", .limit = 1, .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), .pool = blocking_pool_data }; @@ -433,7 +433,7 @@ static struct entropy_store nonblocking_pool = { .poolinfo = &poolinfo_table[1], .name = "nonblocking", .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), .pool = nonblocking_pool_data }; diff --git a/fs/dcache.c b/fs/dcache.c index c6e3535be192..bec4de176c81 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -38,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); -static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(dcache_lock); diff --git a/include/linux/idr.h b/include/linux/idr.h index f559a719dbe8..826803449db7 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -66,7 +66,7 @@ struct idr { .id_free = NULL, \ .layers = 0, \ .id_free_cnt = 0, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 678c1a90380d..1b7bb37624bb 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -21,7 +21,7 @@ .count = ATOMIC_INIT(1), \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ @@ -36,7 +36,7 @@ .user_id = 0, \ .next = NULL, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = SPIN_LOCK_UNLOCKED, \ + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ .reqs_active = 0U, \ .max_reqs = ~0U, \ } @@ -48,7 +48,7 @@ .mm_users = ATOMIC_INIT(2), \ .mm_count = ATOMIC_INIT(1), \ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = SPIN_LOCK_UNLOCKED, \ + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ } @@ -69,7 +69,7 @@ #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ - .siglock = SPIN_LOCK_UNLOCKED, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ } extern struct group_info init_groups; @@ -119,7 +119,7 @@ extern struct group_info init_groups; .list = LIST_HEAD_INIT(tsk.pending.list), \ .signal = {{0}}}, \ .blocked = {{0}}, \ - .alloc_lock = SPIN_LOCK_UNLOCKED, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 51dbab9710c7..7ff386a6ae87 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -65,7 +65,7 @@ struct raw_notifier_head { } while (0) #define ATOMIC_NOTIFIER_INIT(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7bc5c7c12b54..46000936f8f1 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,9 +38,17 @@ typedef struct { * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } -#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define SEQLOCK_UNLOCKED \ + __SEQLOCK_UNLOCKED(old_style_seqlock_init) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 9cb51e070390..f5d4ed7bc785 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -44,24 +44,27 @@ typedef struct { #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } #else -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } #endif -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 544e855c7c02..bc4f389c49bb 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -68,7 +68,7 @@ struct task_struct; wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f464f5ae3f11..759805c9859a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -53,13 +53,13 @@ static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; diff --git a/kernel/timer.c b/kernel/timer.c index 5a8960253063..4dd9a10d67d0 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1208,7 +1208,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES; * playing with xtime and avenrun. */ #ifndef ARCH_HAVE_XTIME_LOCK -seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); EXPORT_SYMBOL(xtime_lock); #endif diff --git a/mm/swap_state.c b/mm/swap_state.c index fccbd9bba77b..5f7cf2a4cb55 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -38,7 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = { struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = RW_LOCK_UNLOCKED, + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa95..823717285c6d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -90,7 +90,7 @@ static struct socket *tcp_socket; void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { - .lhash_lock = RW_LOCK_UNLOCKED, + .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), .lhash_users = ATOMIC_INIT(0), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), }; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5e..0ccb7cb22b15 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow; struct inet_timewait_death_row tcp_death_row = { .sysctl_max_tw_buckets = NR_FILE * 2, .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, - .death_lock = SPIN_LOCK_UNLOCKED, + .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), .hashinfo = &tcp_hashinfo, .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), -- cgit v1.2.3 From 8637c09901049f061b94f684915d4f18ecf91d79 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:38 -0700 Subject: [PATCH] lockdep: stacktrace subsystem, core Framework to generate and save stacktraces quickly, without printing anything to the console. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/stacktrace.h | 20 ++++++++++++++++++++ kernel/Makefile | 1 + kernel/stacktrace.c | 24 ++++++++++++++++++++++++ lib/Kconfig.debug | 6 +++++- 4 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 include/linux/stacktrace.h create mode 100644 kernel/stacktrace.c (limited to 'include/linux') diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h new file mode 100644 index 000000000000..9cc81e572224 --- /dev/null +++ b/include/linux/stacktrace.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_STACKTRACE_H +#define __LINUX_STACKTRACE_H + +#ifdef CONFIG_STACKTRACE +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; +}; + +extern void save_stack_trace(struct stack_trace *trace, + struct task_struct *task, int all_contexts, + unsigned int skip); + +extern void print_stack_trace(struct stack_trace *trace, int spaces); +#else +# define save_stack_trace(trace, task, all, skip) do { } while (0) +# define print_stack_trace(trace) do { } while (0) +#endif + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 82fb182f6f61..e7fd20e70f1b 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,6 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o obj-$(CONFIG_FUTEX) += futex.o diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c new file mode 100644 index 000000000000..b71816e47a30 --- /dev/null +++ b/kernel/stacktrace.c @@ -0,0 +1,24 @@ +/* + * kernel/stacktrace.c + * + * Stack trace management functions + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + */ +#include +#include +#include + +void print_stack_trace(struct stack_trace *trace, int spaces) +{ + int i, j; + + for (i = 0; i < trace->nr_entries; i++) { + unsigned long ip = trace->entries[i]; + + for (j = 0; j < spaces + 1; j++) + printk(" "); + print_ip_sym(ip); + } +} + diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7b3863d4f5e8..04e374c6fd46 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -107,7 +107,7 @@ config DEBUG_SLAB_LEAK config DEBUG_PREEMPT bool "Debug preemptible kernel" - depends on DEBUG_KERNEL && PREEMPT + depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT default y help If you say Y here then the kernel will use a debug variant of the @@ -149,6 +149,10 @@ config DEBUG_SPINLOCK_SLEEP If you say Y here, various routines which may sleep will become very noisy if they are called with a spinlock held. +config STACKTRACE + bool + depends on STACKTRACE_SUPPORT + config DEBUG_KOBJECT bool "kobject debugging" depends on DEBUG_KERNEL -- cgit v1.2.3 From de30a2b355ea85350ca2f58f3b9bf4e5bc007986 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:42 -0700 Subject: [PATCH] lockdep: irqtrace subsystem, core Accurate hard-IRQ-flags and softirq-flags state tracing. This allows us to attach extra functionality to IRQ flags on/off events (such as trace-on/off). Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/irq.c | 2 +- include/asm-powerpc/irqflags.h | 31 ++++++++++ include/linux/hardirq.h | 26 +++++++- include/linux/init_task.h | 2 + include/linux/interrupt.h | 11 ++-- include/linux/irqflags.h | 96 +++++++++++++++++++++++++++++ include/linux/sched.h | 15 +++++ kernel/fork.c | 19 ++++++ kernel/sched.c | 4 +- kernel/softirq.c | 137 +++++++++++++++++++++++++++++++++++------ 10 files changed, 313 insertions(+), 30 deletions(-) create mode 100644 include/asm-powerpc/irqflags.h create mode 100644 include/linux/irqflags.h (limited to 'include/linux') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 525baab45d2d..027728b95429 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -429,7 +429,7 @@ void do_softirq(void) local_bh_disable(); do_softirq_onstack(); account_system_vtime(current); - __local_bh_enable(); + _local_bh_enable(); } local_irq_restore(flags); diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h new file mode 100644 index 000000000000..7970cbaeaa54 --- /dev/null +++ b/include/asm-powerpc/irqflags.h @@ -0,0 +1,31 @@ +/* + * include/asm-powerpc/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +/* + * Get definitions for raw_local_save_flags(x), etc. + */ +#include + +/* + * Do the CPU's IRQ-state tracing from assembly code. We call a + * C function, so save all the C-clobbered registers: + */ +#ifdef CONFIG_TRACE_IRQFLAGS + +#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS + +#else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +#endif + +#endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 114ae583cca9..b1d4332b5cf0 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -86,9 +86,6 @@ extern void synchronize_irq(unsigned int irq); # define synchronize_irq(irq) barrier() #endif -#define nmi_enter() irq_enter() -#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) - struct task_struct; #ifndef CONFIG_VIRT_CPU_ACCOUNTING @@ -97,12 +94,35 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ #define irq_enter() \ do { \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_system_vtime(current); \ + sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) +/* + * Exit irq context and process softirqs if needed: + */ extern void irq_exit(void); +#define nmi_enter() irq_enter() +#define nmi_exit() __irq_exit() + #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1b7bb37624bb..444a3ae0de2a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -3,6 +3,7 @@ #include #include +#include #define INIT_FDTABLE \ { \ @@ -124,6 +125,7 @@ extern struct group_info init_groups; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ + INIT_TRACE_IRQFLAGS \ } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 73463fbb38e4..d5afee95fd43 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -199,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ -/* SoftIRQ primitives. */ -#define local_bh_disable() \ - do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) -#define __local_bh_enable() \ - do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) - +extern void local_bh_disable(void); +extern void __local_bh_enable(void); +extern void _local_bh_enable(void); extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip); /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h new file mode 100644 index 000000000000..412e025bc5c7 --- /dev/null +++ b/include/linux/irqflags.h @@ -0,0 +1,96 @@ +/* + * include/linux/irqflags.h + * + * IRQ flags tracing: follow the state of the hardirq and softirq flags and + * provide callbacks for transitions between ON and OFF states. + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _LINUX_TRACE_IRQFLAGS_H +#define _LINUX_TRACE_IRQFLAGS_H + +#ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define trace_softirq_enter() do { current->softirq_context++; } while (0) +# define trace_softirq_exit() do { current->softirq_context--; } while (0) +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, +#else +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_softirqs_on(ip) do { } while (0) +# define trace_softirqs_off(ip) do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define trace_softirq_enter() do { } while (0) +# define trace_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT + +#include + +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) + +#define local_irq_restore(flags) \ + do { \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ + } while (0) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +# define raw_local_irq_disable() local_irq_disable() +# define raw_local_irq_enable() local_irq_enable() +# define raw_local_irq_save(flags) local_irq_save(flags) +# define raw_local_irq_restore(flags) local_irq_restore(flags) +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ + } while (0) + +#define local_save_flags(flags) raw_local_save_flags(flags) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + \ + raw_local_save_flags(flags); \ + raw_irqs_disabled_flags(flags); \ +}) + +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#endif /* CONFIG_X86 */ + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index bdabeee10a78..ad7a89014d29 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -871,6 +871,21 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + int hardirqs_enabled; + unsigned long hardirq_enable_ip; + unsigned int hardirq_enable_event; + unsigned long hardirq_disable_ip; + unsigned int hardirq_disable_event; + int softirqs_enabled; + unsigned long softirq_disable_ip; + unsigned int softirq_disable_event; + unsigned long softirq_enable_ip; + unsigned int softirq_enable_event; + int hardirq_context; + int softirq_context; +#endif /* journalling filesystem info */ void *journal_info; diff --git a/kernel/fork.c b/kernel/fork.c index 1cd46a4fb0d3..b7db7fb74f53 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -968,6 +968,10 @@ static task_t *copy_process(unsigned long clone_flags, if (!p) goto fork_out; +#ifdef CONFIG_TRACE_IRQFLAGS + DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); +#endif retval = -EAGAIN; if (atomic_read(&p->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { @@ -1042,6 +1046,21 @@ static task_t *copy_process(unsigned long clone_flags, } mpol_fix_fork_child_flag(p); #endif +#ifdef CONFIG_TRACE_IRQFLAGS + p->irq_events = 0; + p->hardirqs_enabled = 0; + p->hardirq_enable_ip = 0; + p->hardirq_enable_event = 0; + p->hardirq_disable_ip = _THIS_IP_; + p->hardirq_disable_event = 0; + p->softirqs_enabled = 1; + p->softirq_enable_ip = _THIS_IP_; + p->softirq_enable_event = 0; + p->softirq_disable_ip = 0; + p->softirq_disable_event = 0; + p->hardirq_context = 0; + p->softirq_context = 0; +#endif rt_mutex_init_task(p); diff --git a/kernel/sched.c b/kernel/sched.c index 48c1faa60a67..911829966534 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4462,7 +4462,9 @@ int __sched cond_resched_softirq(void) BUG_ON(!in_softirq()); if (need_resched() && __resched_legal()) { - __local_bh_enable(); + raw_local_irq_disable(); + _local_bh_enable(); + raw_local_irq_enable(); __cond_resched(); local_bh_disable(); return 1; diff --git a/kernel/softirq.c b/kernel/softirq.c index 8f03e3b89b55..584609b6a66e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -61,6 +61,119 @@ static inline void wakeup_softirqd(void) wake_up_process(tsk); } +/* + * This one is for softirq.c-internal use, + * where hardirqs are disabled legitimately: + */ +static void __local_bh_disable(unsigned long ip) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + + raw_local_irq_save(flags); + add_preempt_count(SOFTIRQ_OFFSET); + /* + * Were softirqs turned off above: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_off(ip); + raw_local_irq_restore(flags); +} + +void local_bh_disable(void) +{ + __local_bh_disable((unsigned long)__builtin_return_address(0)); +} + +EXPORT_SYMBOL(local_bh_disable); + +void __local_bh_enable(void) +{ + WARN_ON_ONCE(in_irq()); + + /* + * softirqs should never be enabled by __local_bh_enable(), + * it always nests inside local_bh_enable() sections: + */ + WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); + + sub_preempt_count(SOFTIRQ_OFFSET); +} +EXPORT_SYMBOL_GPL(__local_bh_enable); + +/* + * Special-case - softirqs can safely be enabled in + * cond_resched_softirq(), or by __do_softirq(), + * without processing still-pending softirqs: + */ +void _local_bh_enable(void) +{ + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + sub_preempt_count(SOFTIRQ_OFFSET); +} + +EXPORT_SYMBOL(_local_bh_enable); + +void local_bh_enable(void) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(irqs_disabled()); + + local_irq_save(flags); + /* + * Are softirqs going to be turned on now: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + /* + * Keep preemption disabled until we are done with + * softirq processing: + */ + sub_preempt_count(SOFTIRQ_OFFSET - 1); + + if (unlikely(!in_interrupt() && local_softirq_pending())) + do_softirq(); + + dec_preempt_count(); + local_irq_restore(flags); + preempt_check_resched(); +} +EXPORT_SYMBOL(local_bh_enable); + +void local_bh_enable_ip(unsigned long ip) +{ + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + + local_irq_save(flags); + /* + * Are softirqs going to be turned on now: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on(ip); + /* + * Keep preemption disabled until we are done with + * softirq processing: + */ + sub_preempt_count(SOFTIRQ_OFFSET - 1); + + if (unlikely(!in_interrupt() && local_softirq_pending())) + do_softirq(); + + dec_preempt_count(); + local_irq_restore(flags); + preempt_check_resched(); +} +EXPORT_SYMBOL(local_bh_enable_ip); + /* * We restart softirq processing MAX_SOFTIRQ_RESTART times, * and we fall back to softirqd after that. @@ -80,8 +193,9 @@ asmlinkage void __do_softirq(void) int cpu; pending = local_softirq_pending(); + __local_bh_disable((unsigned long)__builtin_return_address(0)); + trace_softirq_enter(); - local_bh_disable(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ @@ -109,7 +223,8 @@ restart: if (pending) wakeup_softirqd(); - __local_bh_enable(); + trace_softirq_exit(); + _local_bh_enable(); } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -136,23 +251,6 @@ EXPORT_SYMBOL(do_softirq); #endif -void local_bh_enable(void) -{ - WARN_ON(irqs_disabled()); - /* - * Keep preemption disabled until we are done with - * softirq processing: - */ - sub_preempt_count(SOFTIRQ_OFFSET - 1); - - if (unlikely(!in_interrupt() && local_softirq_pending())) - do_softirq(); - - dec_preempt_count(); - preempt_check_resched(); -} -EXPORT_SYMBOL(local_bh_enable); - #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED # define invoke_softirq() __do_softirq() #else @@ -165,6 +263,7 @@ EXPORT_SYMBOL(local_bh_enable); void irq_exit(void) { account_system_vtime(current); + trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); -- cgit v1.2.3 From fbb9ce9530fd9b66096d5187fa6a115d16d9746c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:50 -0700 Subject: [PATCH] lockdep: core Do 'make oldconfig' and accept all the defaults for new config options - reboot into the kernel and if everything goes well it should boot up fine and you should have /proc/lockdep and /proc/lockdep_stats files. Typically if the lock validator finds some problem it will print out voluminous debug output that begins with "BUG: ..." and which syslog output can be used by kernel developers to figure out the precise locking scenario. What does the lock validator do? It "observes" and maps all locking rules as they occur dynamically (as triggered by the kernel's natural use of spinlocks, rwlocks, mutexes and rwsems). Whenever the lock validator subsystem detects a new locking scenario, it validates this new rule against the existing set of rules. If this new rule is consistent with the existing set of rules then the new rule is added transparently and the kernel continues as normal. If the new rule could create a deadlock scenario then this condition is printed out. When determining validity of locking, all possible "deadlock scenarios" are considered: assuming arbitrary number of CPUs, arbitrary irq context and task context constellations, running arbitrary combinations of all the existing locking scenarios. In a typical system this means millions of separate scenarios. This is why we call it a "locking correctness" validator - for all rules that are observed the lock validator proves it with mathematical certainty that a deadlock could not occur (assuming that the lock validator implementation itself is correct and its internal data structures are not corrupted by some other kernel subsystem). [see more details and conditionals of this statement in include/linux/lockdep.h and Documentation/lockdep-design.txt] Furthermore, this "all possible scenarios" property of the validator also enables the finding of complex, highly unlikely multi-CPU multi-context races via single single-context rules, increasing the likelyhood of finding bugs drastically. In practical terms: the lock validator already found a bug in the upstream kernel that could only occur on systems with 3 or more CPUs, and which needed 3 very unlikely code sequences to occur at once on the 3 CPUs. That bug was found and reported on a single-CPU system (!). So in essence a race will be found "piecemail-wise", triggering all the necessary components for the race, without having to reproduce the race scenario itself! In its short existence the lock validator found and reported many bugs before they actually caused a real deadlock. To further increase the efficiency of the validator, the mapping is not per "lock instance", but per "lock-class". For example, all struct inode objects in the kernel have inode->inotify_mutex. If there are 10,000 inodes cached, then there are 10,000 lock objects. But ->inotify_mutex is a single "lock type", and all locking activities that occur against ->inotify_mutex are "unified" into this single lock-class. The advantage of the lock-class approach is that all historical ->inotify_mutex uses are mapped into a single (and as narrow as possible) set of locking rules - regardless of how many different tasks or inode structures it took to build this set of rules. The set of rules persist during the lifetime of the kernel. To see the rough magnitude of checking that the lock validator does, here's a portion of /proc/lockdep_stats, fresh after bootup: lock-classes: 694 [max: 2048] direct dependencies: 1598 [max: 8192] indirect dependencies: 17896 all direct dependencies: 16206 dependency chains: 1910 [max: 8192] in-hardirq chains: 17 in-softirq chains: 105 in-process chains: 1065 stack-trace entries: 38761 [max: 131072] combined max dependencies: 2033928 hardirq-safe locks: 24 hardirq-unsafe locks: 176 softirq-safe locks: 53 softirq-unsafe locks: 137 irq-safe locks: 59 irq-unsafe locks: 176 The lock validator has observed 1598 actual single-thread locking patterns, and has validated all possible 2033928 distinct locking scenarios. More details about the design of the lock validator can be found in Documentation/lockdep-design.txt, which can also found at: http://redhat.com/~mingo/lockdep-patches/lockdep-design.txt [bunk@stusta.de: cleanups] Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hardirq.h | 5 +- include/linux/init_task.h | 2 + include/linux/lockdep.h | 347 ++++++ include/linux/sched.h | 7 + init/main.c | 14 + kernel/Makefile | 1 + kernel/fork.c | 5 + kernel/irq/manage.c | 6 + kernel/lockdep.c | 2703 ++++++++++++++++++++++++++++++++++++++++++++ kernel/lockdep_internals.h | 78 ++ kernel/module.c | 3 + lib/Kconfig.debug | 2 +- lib/locking-selftest.c | 4 +- 13 files changed, 3171 insertions(+), 6 deletions(-) create mode 100644 include/linux/lockdep.h create mode 100644 kernel/lockdep.c create mode 100644 kernel/lockdep_internals.h (limited to 'include/linux') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index b1d4332b5cf0..50d8b5744cf6 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -122,7 +123,7 @@ static inline void account_system_vtime(struct task_struct *tsk) */ extern void irq_exit(void); -#define nmi_enter() irq_enter() -#define nmi_exit() __irq_exit() +#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0) +#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 444a3ae0de2a..60aac2cea0cf 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -4,6 +4,7 @@ #include #include #include +#include #define INIT_FDTABLE \ { \ @@ -126,6 +127,7 @@ extern struct group_info init_groups; .fs_excl = ATOMIC_INIT(0), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h new file mode 100644 index 000000000000..80ec7a4dbc98 --- /dev/null +++ b/include/linux/lockdep.h @@ -0,0 +1,347 @@ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * + * see Documentation/lockdep-design.txt for more details. + */ +#ifndef __LINUX_LOCKDEP_H +#define __LINUX_LOCKDEP_H + +#include +#include +#include +#include + +#ifdef CONFIG_LOCKDEP + +/* + * Lock-class usage-state bits: + */ +enum lock_usage_bit +{ + LOCK_USED = 0, + LOCK_USED_IN_HARDIRQ, + LOCK_USED_IN_SOFTIRQ, + LOCK_ENABLED_SOFTIRQS, + LOCK_ENABLED_HARDIRQS, + LOCK_USED_IN_HARDIRQ_READ, + LOCK_USED_IN_SOFTIRQ_READ, + LOCK_ENABLED_SOFTIRQS_READ, + LOCK_ENABLED_HARDIRQS_READ, + LOCK_USAGE_STATES +}; + +/* + * Usage-state bitmasks: + */ +#define LOCKF_USED (1 << LOCK_USED) +#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) +#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) +#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) + +#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) +#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) + +#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) +#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) +#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) + +#define LOCKF_ENABLED_IRQS_READ \ + (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) +#define LOCKF_USED_IN_IRQ_READ \ + (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct list_head hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; +}; + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class[MAX_LOCKDEP_SUBCLASSES]; + const char *name; +}; + +/* + * Every lock has a list of other locks that were taken after it. + * We only grow the list, never remove from it: + */ +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct stack_trace trace; +}; + +/* + * We record lock dependency chains, so that we can cache them: + */ +struct lock_chain { + struct list_head entry; + u64 chain_key; +}; + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + struct lock_class *class; + unsigned long acquire_ip; + struct lockdep_map *instance; + + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + int irq_context; + int trylock; + int read; + int check; + int hardirqs_off; +}; + +/* + * Initialization, self-test and debugging-output methods: + */ +extern void lockdep_init(void); +extern void lockdep_info(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); + +extern void lockdep_off(void); +extern void lockdep_on(void); +extern int lockdep_internal(void); + +/* + * These methods are used by specific locking variants (spinlocks, + * rwlocks, mutexes and rwsems) to pass init/acquire/release events + * to lockdep: + */ + +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key); + +/* + * Reinitialize a lock key - for cases where there is special locking or + * special initialization of locks so that the validator gets the scope + * of dependencies wrong: they are either too broad (they need a class-split) + * or they are too narrow (they suffer from a false class-split): + */ +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key) + +/* + * Acquire a lock. + * + * Values for "read": + * + * 0: exclusive (write) acquire + * 1: read-acquire (no recursion allowed) + * 2: read-acquire with same-instance recursion allowed + * + * Values for check: + * + * 0: disabled + * 1: simple checks (freeing, held-at-exit-time, etc.) + * 2: full validation + */ +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +# define INIT_LOCKDEP .lockdep_recursion = 0, + +#else /* !LOCKDEP */ + +static inline void lockdep_off(void) +{ +} + +static inline void lockdep_on(void) +{ +} + +static inline int lockdep_internal(void) +{ + return 0; +} + +# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) +# define lockdep_init() do { } while (0) +# define lockdep_info() do { } while (0) +# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0) +# define lockdep_set_class(lock, key) do { (void)(key); } while (0) +# define lockdep_set_class_and_name(lock, key, name) \ + do { (void)(key); } while (0) +# define INIT_LOCKDEP +# define lockdep_reset() do { debug_locks = 1; } while (0) +# define lockdep_free_key_range(start, size) do { } while (0) +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; +#endif /* !LOCKDEP */ + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void early_boot_irqs_off(void); +extern void early_boot_irqs_on(void); +#else +# define early_boot_irqs_off() do { } while (0) +# define early_boot_irqs_on() do { } while (0) +#endif + +/* + * For trivial one-depth nesting of a lock-class, the following + * global define can be used. (Subsystems with multiple levels + * of nesting should define their own lock-nesting subclasses.) + */ +#define SINGLE_DEPTH_NESTING 1 + +/* + * Map the dependency ops to NOP or to real lockdep ops, depending + * on the per lock-class debug mode: + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define spin_release(l, n, i) lock_release(l, n, i) +#else +# define spin_acquire(l, s, t, i) do { } while (0) +# define spin_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# else +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# endif +# define rwlock_release(l, n, i) lock_release(l, n, i) +#else +# define rwlock_acquire(l, s, t, i) do { } while (0) +# define rwlock_acquire_read(l, s, t, i) do { } while (0) +# define rwlock_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define mutex_release(l, n, i) lock_release(l, n, i) +#else +# define mutex_acquire(l, s, t, i) do { } while (0) +# define mutex_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# else +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# endif +# define rwsem_release(l, n, i) lock_release(l, n, i) +#else +# define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_read(l, s, t, i) do { } while (0) +# define rwsem_release(l, n, i) do { } while (0) +#endif + +#endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index ad7a89014d29..8ebddba4448d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -886,6 +886,13 @@ struct task_struct { int hardirq_context; int softirq_context; #endif +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 30UL + u64 curr_chain_key; + int lockdep_depth; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + unsigned int lockdep_recursion; +#endif /* journalling filesystem info */ void *journal_info; diff --git a/init/main.c b/init/main.c index fc73e1cd8614..fc473d4b56fd 100644 --- a/init/main.c +++ b/init/main.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -457,6 +458,15 @@ asmlinkage void __init start_kernel(void) smp_setup_processor_id(); + /* + * Need to run as early as possible, to initialize the + * lockdep hash: + */ + lockdep_init(); + + local_irq_disable(); + early_boot_irqs_off(); + /* * Interrupts are still disabled. Do necessary setups, then * enable them @@ -502,6 +512,7 @@ asmlinkage void __init start_kernel(void) profile_init(); if (!irqs_disabled()) printk("start_kernel(): bug: interrupts were enabled early\n"); + early_boot_irqs_on(); local_irq_enable(); /* @@ -512,6 +523,9 @@ asmlinkage void __init start_kernel(void) console_init(); if (panic_later) panic(panic_later, panic_param); + + lockdep_info(); + /* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs diff --git a/kernel/Makefile b/kernel/Makefile index e7fd20e70f1b..049aa79e5c1c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -13,6 +13,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +obj-$(CONFIG_LOCKDEP) += lockdep.o obj-$(CONFIG_FUTEX) += futex.o ifeq ($(CONFIG_COMPAT),y) obj-$(CONFIG_FUTEX) += futex_compat.o diff --git a/kernel/fork.c b/kernel/fork.c index b7db7fb74f53..7f48abdd7bb6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1061,6 +1061,11 @@ static task_t *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif +#ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; + p->lockdep_recursion = 0; +#endif rt_mutex_init_task(p); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c911c6ec4dd6..4e461438e48b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -410,6 +410,12 @@ int request_irq(unsigned int irq, struct irqaction *action; int retval; +#ifdef CONFIG_LOCKDEP + /* + * Lockdep wants atomic interrupt handlers: + */ + irqflags |= SA_INTERRUPT; +#endif /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out diff --git a/kernel/lockdep.c b/kernel/lockdep.c new file mode 100644 index 000000000000..dd0580910a97 --- /dev/null +++ b/kernel/lockdep.c @@ -0,0 +1,2703 @@ +/* + * kernel/lockdep.c + * + * Runtime locking correctness validator + * + * Started by Ingo Molnar: + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * + * this code maps all the lock dependencies as they occur in a live kernel + * and will warn about the following classes of locking bugs: + * + * - lock inversion scenarios + * - circular lock dependencies + * - hardirq/softirq safe/unsafe locking bugs + * + * Bugs are reported even if the current locking scenario does not cause + * any deadlock at this point. + * + * I.e. if anytime in the past two locks were taken in a different order, + * even if it happened for another task, even if those were different + * locks (but of the same class as this lock), this code will detect it. + * + * Thanks to Arjan van de Ven for coming up with the initial idea of + * mapping lock dependencies runtime. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "lockdep_internals.h" + +/* + * hash_lock: protects the lockdep hashes and class/list/hash allocators. + * + * This is one of the rare exceptions where it's justified + * to use a raw spinlock - we really dont want the spinlock + * code to recurse back into the lockdep code. + */ +static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + +static int lockdep_initialized; + +unsigned long nr_list_entries; +static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; + +/* + * Allocate a lockdep entry. (assumes hash_lock held, returns + * with NULL on failure) + */ +static struct lock_list *alloc_list_entry(void) +{ + if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); + printk("turning off the locking correctness validator.\n"); + return NULL; + } + return list_entries + nr_list_entries++; +} + +/* + * All data structures here are protected by the global debug_lock. + * + * Mutex key structs only get allocated, once during bootup, and never + * get freed - this significantly simplifies the debugging code. + */ +unsigned long nr_lock_classes; +static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; + +/* + * We keep a global list of all lock classes. The list only grows, + * never shrinks. The list is only accessed with the lockdep + * spinlock lock held. + */ +LIST_HEAD(all_lock_classes); + +/* + * The lockdep classes are in a hash-table as well, for fast lookup: + */ +#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) +#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) +#define CLASSHASH_MASK (CLASSHASH_SIZE - 1) +#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK) +#define classhashentry(key) (classhash_table + __classhashfn((key))) + +static struct list_head classhash_table[CLASSHASH_SIZE]; + +unsigned long nr_lock_chains; +static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; + +/* + * We put the lock dependency chains into a hash-table as well, to cache + * their existence: + */ +#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) +#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) +#define CHAINHASH_MASK (CHAINHASH_SIZE - 1) +#define __chainhashfn(chain) \ + (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK) +#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) + +static struct list_head chainhash_table[CHAINHASH_SIZE]; + +/* + * The hash key of the lock dependency chains is a hash itself too: + * it's a hash of all locks taken up to that lock, including that lock. + * It's a 64-bit hash, because it's important for the keys to be + * unique. + */ +#define iterate_chain_key(key1, key2) \ + (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \ + ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \ + (key2)) + +void lockdep_off(void) +{ + current->lockdep_recursion++; +} + +EXPORT_SYMBOL(lockdep_off); + +void lockdep_on(void) +{ + current->lockdep_recursion--; +} + +EXPORT_SYMBOL(lockdep_on); + +int lockdep_internal(void) +{ + return current->lockdep_recursion != 0; +} + +EXPORT_SYMBOL(lockdep_internal); + +/* + * Debugging switches: + */ + +#define VERBOSE 0 +#ifdef VERBOSE +# define VERY_VERBOSE 0 +#endif + +#if VERBOSE +# define HARDIRQ_VERBOSE 1 +# define SOFTIRQ_VERBOSE 1 +#else +# define HARDIRQ_VERBOSE 0 +# define SOFTIRQ_VERBOSE 0 +#endif + +#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE +/* + * Quick filtering for interesting events: + */ +static int class_filter(struct lock_class *class) +{ + if (class->name_version == 1 && + !strcmp(class->name, "&rl->lock")) + return 1; + if (class->name_version == 1 && + !strcmp(class->name, "&ni->mrec_lock")) + return 1; + if (class->name_version == 1 && + !strcmp(class->name, "mft_ni_runlist_lock")) + return 1; + if (class->name_version == 1 && + !strcmp(class->name, "mft_ni_mrec_lock")) + return 1; + if (class->name_version == 1 && + !strcmp(class->name, "&vol->lcnbmp_lock")) + return 1; + return 0; +} +#endif + +static int verbose(struct lock_class *class) +{ +#if VERBOSE + return class_filter(class); +#endif + return 0; +} + +#ifdef CONFIG_TRACE_IRQFLAGS + +static int hardirq_verbose(struct lock_class *class) +{ +#if HARDIRQ_VERBOSE + return class_filter(class); +#endif + return 0; +} + +static int softirq_verbose(struct lock_class *class) +{ +#if SOFTIRQ_VERBOSE + return class_filter(class); +#endif + return 0; +} + +#endif + +/* + * Stack-trace: tightly packed array of stack backtrace + * addresses. Protected by the hash_lock. + */ +unsigned long nr_stack_trace_entries; +static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; + +static int save_trace(struct stack_trace *trace) +{ + trace->nr_entries = 0; + trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; + trace->entries = stack_trace + nr_stack_trace_entries; + + save_stack_trace(trace, NULL, 0, 3); + + trace->max_entries = trace->nr_entries; + + nr_stack_trace_entries += trace->nr_entries; + if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) + return 0; + + if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { + __raw_spin_unlock(&hash_lock); + if (debug_locks_off()) { + printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); + } + return 0; + } + + return 1; +} + +unsigned int nr_hardirq_chains; +unsigned int nr_softirq_chains; +unsigned int nr_process_chains; +unsigned int max_lockdep_depth; +unsigned int max_recursion_depth; + +#ifdef CONFIG_DEBUG_LOCKDEP +/* + * We cannot printk in early bootup code. Not even early_printk() + * might work. So we mark any initialization errors and printk + * about it later on, in lockdep_info(). + */ +static int lockdep_init_error; + +/* + * Various lockdep statistics: + */ +atomic_t chain_lookup_hits; +atomic_t chain_lookup_misses; +atomic_t hardirqs_on_events; +atomic_t hardirqs_off_events; +atomic_t redundant_hardirqs_on; +atomic_t redundant_hardirqs_off; +atomic_t softirqs_on_events; +atomic_t softirqs_off_events; +atomic_t redundant_softirqs_on; +atomic_t redundant_softirqs_off; +atomic_t nr_unused_locks; +atomic_t nr_cyclic_checks; +atomic_t nr_cyclic_check_recursions; +atomic_t nr_find_usage_forwards_checks; +atomic_t nr_find_usage_forwards_recursions; +atomic_t nr_find_usage_backwards_checks; +atomic_t nr_find_usage_backwards_recursions; +# define debug_atomic_inc(ptr) atomic_inc(ptr) +# define debug_atomic_dec(ptr) atomic_dec(ptr) +# define debug_atomic_read(ptr) atomic_read(ptr) +#else +# define debug_atomic_inc(ptr) do { } while (0) +# define debug_atomic_dec(ptr) do { } while (0) +# define debug_atomic_read(ptr) 0 +#endif + +/* + * Locking printouts: + */ + +static const char *usage_str[] = +{ + [LOCK_USED] = "initial-use ", + [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", + [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", + [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", + [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", + [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", + [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", + [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", + [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", +}; + +const char * __get_key_name(struct lockdep_subclass_key *key, char *str) +{ + unsigned long offs, size; + char *modname; + + return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str); +} + +void +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) +{ + *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; + + if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) + *c1 = '+'; + else + if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) + *c1 = '-'; + + if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) + *c2 = '+'; + else + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) + *c2 = '-'; + + if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) + *c3 = '-'; + if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { + *c3 = '+'; + if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) + *c3 = '?'; + } + + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) + *c4 = '-'; + if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { + *c4 = '+'; + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) + *c4 = '?'; + } +} + +static void print_lock_name(struct lock_class *class) +{ + char str[128], c1, c2, c3, c4; + const char *name; + + get_usage_chars(class, &c1, &c2, &c3, &c4); + + name = class->name; + if (!name) { + name = __get_key_name(class->key, str); + printk(" (%s", name); + } else { + printk(" (%s", name); + if (class->name_version > 1) + printk("#%d", class->name_version); + if (class->subclass) + printk("/%d", class->subclass); + } + printk("){%c%c%c%c}", c1, c2, c3, c4); +} + +static void print_lockdep_cache(struct lockdep_map *lock) +{ + const char *name; + char str[128]; + + name = lock->name; + if (!name) + name = __get_key_name(lock->key->subkeys, str); + + printk("%s", name); +} + +static void print_lock(struct held_lock *hlock) +{ + print_lock_name(hlock->class); + printk(", at: "); + print_ip_sym(hlock->acquire_ip); +} + +static void lockdep_print_held_locks(struct task_struct *curr) +{ + int i, depth = curr->lockdep_depth; + + if (!depth) { + printk("no locks held by %s/%d.\n", curr->comm, curr->pid); + return; + } + printk("%d lock%s held by %s/%d:\n", + depth, depth > 1 ? "s" : "", curr->comm, curr->pid); + + for (i = 0; i < depth; i++) { + printk(" #%d: ", i); + print_lock(curr->held_locks + i); + } +} +/* + * Helper to print a nice hierarchy of lock dependencies: + */ +static void print_spaces(int nr) +{ + int i; + + for (i = 0; i < nr; i++) + printk(" "); +} + +static void print_lock_class_header(struct lock_class *class, int depth) +{ + int bit; + + print_spaces(depth); + printk("->"); + print_lock_name(class); + printk(" ops: %lu", class->ops); + printk(" {\n"); + + for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { + if (class->usage_mask & (1 << bit)) { + int len = depth; + + print_spaces(depth); + len += printk(" %s", usage_str[bit]); + len += printk(" at:\n"); + print_stack_trace(class->usage_traces + bit, len); + } + } + print_spaces(depth); + printk(" }\n"); + + print_spaces(depth); + printk(" ... key at: "); + print_ip_sym((unsigned long)class->key); +} + +/* + * printk all lock dependencies starting at : + */ +static void print_lock_dependencies(struct lock_class *class, int depth) +{ + struct lock_list *entry; + + if (DEBUG_LOCKS_WARN_ON(depth >= 20)) + return; + + print_lock_class_header(class, depth); + + list_for_each_entry(entry, &class->locks_after, entry) { + DEBUG_LOCKS_WARN_ON(!entry->class); + print_lock_dependencies(entry->class, depth + 1); + + print_spaces(depth); + printk(" ... acquired at:\n"); + print_stack_trace(&entry->trace, 2); + printk("\n"); + } +} + +/* + * Add a new dependency to the head of the list: + */ +static int add_lock_to_list(struct lock_class *class, struct lock_class *this, + struct list_head *head, unsigned long ip) +{ + struct lock_list *entry; + /* + * Lock not present yet - get a new dependency struct and + * add it to the list: + */ + entry = alloc_list_entry(); + if (!entry) + return 0; + + entry->class = this; + save_trace(&entry->trace); + + /* + * Since we never remove from the dependency list, the list can + * be walked lockless by other CPUs, it's only allocation + * that must be protected by the spinlock. But this also means + * we must make new entries visible only once writes to the + * entry become visible - hence the RCU op: + */ + list_add_tail_rcu(&entry->entry, head); + + return 1; +} + +/* + * Recursive, forwards-direction lock-dependency checking, used for + * both noncyclic checking and for hardirq-unsafe/softirq-unsafe + * checking. + * + * (to keep the stackframe of the recursive functions small we + * use these global variables, and we also mark various helper + * functions as noinline.) + */ +static struct held_lock *check_source, *check_target; + +/* + * Print a dependency chain entry (this is only done when a deadlock + * has been detected): + */ +static noinline int +print_circular_bug_entry(struct lock_list *target, unsigned int depth) +{ + if (debug_locks_silent) + return 0; + printk("\n-> #%u", depth); + print_lock_name(target->class); + printk(":\n"); + print_stack_trace(&target->trace, 6); + + return 0; +} + +/* + * When a circular dependency is detected, print the + * header first: + */ +static noinline int +print_circular_bug_header(struct lock_list *entry, unsigned int depth) +{ + struct task_struct *curr = current; + + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + if (debug_locks_silent) + return 0; + + printk("\n=======================================================\n"); + printk( "[ INFO: possible circular locking dependency detected ]\n"); + printk( "-------------------------------------------------------\n"); + printk("%s/%d is trying to acquire lock:\n", + curr->comm, curr->pid); + print_lock(check_source); + printk("\nbut task is already holding lock:\n"); + print_lock(check_target); + printk("\nwhich lock already depends on the new lock.\n\n"); + printk("\nthe existing dependency chain (in reverse order) is:\n"); + + print_circular_bug_entry(entry, depth); + + return 0; +} + +static noinline int print_circular_bug_tail(void) +{ + struct task_struct *curr = current; + struct lock_list this; + + if (debug_locks_silent) + return 0; + + this.class = check_source->class; + save_trace(&this.trace); + print_circular_bug_entry(&this, 0); + + printk("\nother info that might help us debug this:\n\n"); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +static int noinline print_infinite_recursion_bug(void) +{ + __raw_spin_unlock(&hash_lock); + DEBUG_LOCKS_WARN_ON(1); + + return 0; +} + +/* + * Prove that the dependency graph starting at can not + * lead to . Print an error and return 0 if it does. + */ +static noinline int +check_noncircular(struct lock_class *source, unsigned int depth) +{ + struct lock_list *entry; + + debug_atomic_inc(&nr_cyclic_check_recursions); + if (depth > max_recursion_depth) + max_recursion_depth = depth; + if (depth >= 20) + return print_infinite_recursion_bug(); + /* + * Check this lock's dependency list: + */ + list_for_each_entry(entry, &source->locks_after, entry) { + if (entry->class == check_target->class) + return print_circular_bug_header(entry, depth+1); + debug_atomic_inc(&nr_cyclic_checks); + if (!check_noncircular(entry->class, depth+1)) + return print_circular_bug_entry(entry, depth+1); + } + return 1; +} + +static int very_verbose(struct lock_class *class) +{ +#if VERY_VERBOSE + return class_filter(class); +#endif + return 0; +} +#ifdef CONFIG_TRACE_IRQFLAGS + +/* + * Forwards and backwards subgraph searching, for the purposes of + * proving that two subgraphs can be connected by a new dependency + * without creating any illegal irq-safe -> irq-unsafe lock dependency. + */ +static enum lock_usage_bit find_usage_bit; +static struct lock_class *forwards_match, *backwards_match; + +/* + * Find a node in the forwards-direction dependency sub-graph starting + * at that matches . + * + * Return 2 if such a node exists in the subgraph, and put that node + * into . + * + * Return 1 otherwise and keep unchanged. + * Return 0 on error. + */ +static noinline int +find_usage_forwards(struct lock_class *source, unsigned int depth) +{ + struct lock_list *entry; + int ret; + + if (depth > max_recursion_depth) + max_recursion_depth = depth; + if (depth >= 20) + return print_infinite_recursion_bug(); + + debug_atomic_inc(&nr_find_usage_forwards_checks); + if (source->usage_mask & (1 << find_usage_bit)) { + forwards_match = source; + return 2; + } + + /* + * Check this lock's dependency list: + */ + list_for_each_entry(entry, &source->locks_after, entry) { + debug_atomic_inc(&nr_find_usage_forwards_recursions); + ret = find_usage_forwards(entry->class, depth+1); + if (ret == 2 || ret == 0) + return ret; + } + return 1; +} + +/* + * Find a node in the backwards-direction dependency sub-graph starting + * at that matches . + * + * Return 2 if such a node exists in the subgraph, and put that node + * into . + * + * Return 1 otherwise and keep unchanged. + * Return 0 on error. + */ +static noinline int +find_usage_backwards(struct lock_class *source, unsigned int depth) +{ + struct lock_list *entry; + int ret; + + if (depth > max_recursion_depth) + max_recursion_depth = depth; + if (depth >= 20) + return print_infinite_recursion_bug(); + + debug_atomic_inc(&nr_find_usage_backwards_checks); + if (source->usage_mask & (1 << find_usage_bit)) { + backwards_match = source; + return 2; + } + + /* + * Check this lock's dependency list: + */ + list_for_each_entry(entry, &source->locks_before, entry) { + debug_atomic_inc(&nr_find_usage_backwards_recursions); + ret = find_usage_backwards(entry->class, depth+1); + if (ret == 2 || ret == 0) + return ret; + } + return 1; +} + +static int +print_bad_irq_dependency(struct task_struct *curr, + struct held_lock *prev, + struct held_lock *next, + enum lock_usage_bit bit1, + enum lock_usage_bit bit2, + const char *irqclass) +{ + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + if (debug_locks_silent) + return 0; + + printk("\n======================================================\n"); + printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", + irqclass, irqclass); + printk( "------------------------------------------------------\n"); + printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", + curr->comm, curr->pid, + curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, + curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, + curr->hardirqs_enabled, + curr->softirqs_enabled); + print_lock(next); + + printk("\nand this task is already holding:\n"); + print_lock(prev); + printk("which would create a new lock dependency:\n"); + print_lock_name(prev->class); + printk(" ->"); + print_lock_name(next->class); + printk("\n"); + + printk("\nbut this new dependency connects a %s-irq-safe lock:\n", + irqclass); + print_lock_name(backwards_match); + printk("\n... which became %s-irq-safe at:\n", irqclass); + + print_stack_trace(backwards_match->usage_traces + bit1, 1); + + printk("\nto a %s-irq-unsafe lock:\n", irqclass); + print_lock_name(forwards_match); + printk("\n... which became %s-irq-unsafe at:\n", irqclass); + printk("..."); + + print_stack_trace(forwards_match->usage_traces + bit2, 1); + + printk("\nother info that might help us debug this:\n\n"); + lockdep_print_held_locks(curr); + + printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); + print_lock_dependencies(backwards_match, 0); + + printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); + print_lock_dependencies(forwards_match, 0); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +static int +check_usage(struct task_struct *curr, struct held_lock *prev, + struct held_lock *next, enum lock_usage_bit bit_backwards, + enum lock_usage_bit bit_forwards, const char *irqclass) +{ + int ret; + + find_usage_bit = bit_backwards; + /* fills in */ + ret = find_usage_backwards(prev->class, 0); + if (!ret || ret == 1) + return ret; + + find_usage_bit = bit_forwards; + ret = find_usage_forwards(next->class, 0); + if (!ret || ret == 1) + return ret; + /* ret == 2 */ + return print_bad_irq_dependency(curr, prev, next, + bit_backwards, bit_forwards, irqclass); +} + +#endif + +static int +print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, + struct held_lock *next) +{ + debug_locks_off(); + __raw_spin_unlock(&hash_lock); + if (debug_locks_silent) + return 0; + + printk("\n=============================================\n"); + printk( "[ INFO: possible recursive locking detected ]\n"); + printk( "---------------------------------------------\n"); + printk("%s/%d is trying to acquire lock:\n", + curr->comm, curr->pid); + print_lock(next); + printk("\nbut task is already holding lock:\n"); + print_lock(prev); + + printk("\nother info that might help us debug this:\n"); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +/* + * Check whether we are holding such a class already. + * + * (Note that this has to be done separately, because the graph cannot + * detect such classes of deadlocks.) + * + * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read + */ +static int +check_deadlock(struct task_struct *curr, struct held_lock *next, + struct lockdep_map *next_instance, int read) +{ + struct held_lock *prev; + int i; + + for (i = 0; i < curr->lockdep_depth; i++) { + prev = curr->held_locks + i; + if (prev->class != next->class) + continue; + /* + * Allow read-after-read recursion of the same + * lock instance (i.e. read_lock(lock)+read_lock(lock)): + */ + if ((read == 2) && prev->read && + (prev->instance == next_instance)) + return 2; + return print_deadlock_bug(curr, prev, next); + } + return 1; +} + +/* + * There was a chain-cache miss, and we are about to add a new dependency + * to a previous lock. We recursively validate the following rules: + * + * - would the adding of the -> dependency create a + * circular dependency in the graph? [== circular deadlock] + * + * - does the new prev->next dependency connect any hardirq-safe lock + * (in the full backwards-subgraph starting at ) with any + * hardirq-unsafe lock (in the full forwards-subgraph starting at + * )? [== illegal lock inversion with hardirq contexts] + * + * - does the new prev->next dependency connect any softirq-safe lock + * (in the full backwards-subgraph starting at ) with any + * softirq-unsafe lock (in the full forwards-subgraph starting at + * )? [== illegal lock inversion with softirq contexts] + * + * any of these scenarios could lead to a deadlock. + * + * Then if all the validations pass, we add the forwards and backwards + * dependency. + */ +static int +check_prev_add(struct task_struct *curr, struct held_lock *prev, + struct held_lock *next) +{ + struct lock_list *entry; + int ret; + + /* + * Prove that the new -> dependency would not + * create a circular dependency in the graph. (We do this by + * forward-recursing into the graph starting at , and + * checking whether we can reach .) + * + * We are using global variables to control the recursion, to + * keep the stackframe size of the recursive functions low: + */ + check_source = next; + check_target = prev; + if (!(check_noncircular(next->class, 0))) + return print_circular_bug_tail(); + +#ifdef CONFIG_TRACE_IRQFLAGS + /* + * Prove that the new dependency does not connect a hardirq-safe + * lock with a hardirq-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, + LOCK_ENABLED_HARDIRQS, "hard")) + return 0; + + /* + * Prove that the new dependency does not connect a hardirq-safe-read + * lock with a hardirq-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, + LOCK_ENABLED_HARDIRQS, "hard-read")) + return 0; + + /* + * Prove that the new dependency does not connect a softirq-safe + * lock with a softirq-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, + LOCK_ENABLED_SOFTIRQS, "soft")) + return 0; + /* + * Prove that the new dependency does not connect a softirq-safe-read + * lock with a softirq-unsafe lock - to achieve this we search + * the backwards-subgraph starting at , and the + * forwards-subgraph starting at : + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, + LOCK_ENABLED_SOFTIRQS, "soft")) + return 0; +#endif + /* + * For recursive read-locks we do all the dependency checks, + * but we dont store read-triggered dependencies (only + * write-triggered dependencies). This ensures that only the + * write-side dependencies matter, and that if for example a + * write-lock never takes any other locks, then the reads are + * equivalent to a NOP. + */ + if (next->read == 2 || prev->read == 2) + return 1; + /* + * Is the -> dependency already present? + * + * (this may occur even though this is a new chain: consider + * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 + * chains - the second one will be new, but L1 already has + * L2 added to its dependency list, due to the first chain.) + */ + list_for_each_entry(entry, &prev->class->locks_after, entry) { + if (entry->class == next->class) + return 2; + } + + /* + * Ok, all validations passed, add the new lock + * to the previous lock's dependency list: + */ + ret = add_lock_to_list(prev->class, next->class, + &prev->class->locks_after, next->acquire_ip); + if (!ret) + return 0; + /* + * Return value of 2 signals 'dependency already added', + * in that case we dont have to add the backlink either. + */ + if (ret == 2) + return 2; + ret = add_lock_to_list(next->class, prev->class, + &next->class->locks_before, next->acquire_ip); + + /* + * Debugging printouts: + */ + if (verbose(prev->class) || verbose(next->class)) { + __raw_spin_unlock(&hash_lock); + printk("\n new dependency: "); + print_lock_name(prev->class); + printk(" => "); + print_lock_name(next->class); + printk("\n"); + dump_stack(); + __raw_spin_lock(&hash_lock); + } + return 1; +} + +/* + * Add the dependency to all directly-previous locks that are 'relevant'. + * The ones that are relevant are (in increasing distance from curr): + * all consecutive trylock entries and the final non-trylock entry - or + * the end of this context's lock-chain - whichever comes first. + */ +static int +check_prevs_add(struct task_struct *curr, struct held_lock *next) +{ + int depth = curr->lockdep_depth; + struct held_lock *hlock; + + /* + * Debugging checks. + * + * Depth must not be zero for a non-head lock: + */ + if (!depth) + goto out_bug; + /* + * At least two relevant locks must exist for this + * to be a head: + */ + if (curr->held_locks[depth].irq_context != + curr->held_locks[depth-1].irq_context) + goto out_bug; + + for (;;) { + hlock = curr->held_locks + depth-1; + /* + * Only non-recursive-read entries get new dependencies + * added: + */ + if (hlock->read != 2) { + check_prev_add(curr, hlock, next); + /* + * Stop after the first non-trylock entry, + * as non-trylock entries have added their + * own direct dependencies already, so this + * lock is connected to them indirectly: + */ + if (!hlock->trylock) + break; + } + depth--; + /* + * End of lock-stack? + */ + if (!depth) + break; + /* + * Stop the search if we cross into another context: + */ + if (curr->held_locks[depth].irq_context != + curr->held_locks[depth-1].irq_context) + break; + } + return 1; +out_bug: + __raw_spin_unlock(&hash_lock); + DEBUG_LOCKS_WARN_ON(1); + + return 0; +} + + +/* + * Is this the address of a static object: + */ +static int static_obj(void *obj) +{ + unsigned long start = (unsigned long) &_stext, + end = (unsigned long) &_end, + addr = (unsigned long) obj; +#ifdef CONFIG_SMP + int i; +#endif + + /* + * static variable? + */ + if ((addr >= start) && (addr < end)) + return 1; + +#ifdef CONFIG_SMP + /* + * percpu var? + */ + for_each_possible_cpu(i) { + start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); + end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); + + if ((addr >= start) && (addr < end)) + return 1; + } +#endif + + /* + * module var? + */ + return is_module_address(addr); +} + +/* + * To make lock name printouts unique, we calculate a unique + * class->name_version generation counter: + */ +static int count_matching_names(struct lock_class *new_class) +{ + struct lock_class *class; + int count = 0; + + if (!new_class->name) + return 0; + + list_for_each_entry(class, &all_lock_classes, lock_entry) { + if (new_class->key - new_class->subclass == class->key) + return class->name_version; + if (class->name && !strcmp(class->name, new_class->name)) + count = max(count, class->name_version); + } + + return count + 1; +} + +extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); + +/* + * Register a lock's class in the hash-table, if the class is not present + * yet. Otherwise we look it up. We cache the result in the lock object + * itself, so actual lookup of the hash should be once per lock object. + */ +static inline struct lock_class * +register_lock_class(struct lockdep_map *lock, unsigned int subclass) +{ + struct lockdep_subclass_key *key; + struct list_head *hash_head; + struct lock_class *class; + +#ifdef CONFIG_DEBUG_LOCKDEP + /* + * If the architecture calls into lockdep before initializing + * the hashes then we'll warn about it later. (we cannot printk + * right now) + */ + if (unlikely(!lockdep_initialized)) { + lockdep_init(); + lockdep_init_error = 1; + } +#endif + + /* + * Static locks do not have their class-keys yet - for them the key + * is the lock object itself: + */ + if (unlikely(!lock->key)) + lock->key = (void *)lock; + + /* + * NOTE: the class-key must be unique. For dynamic locks, a static + * lock_class_key variable is passed in through the mutex_init() + * (or spin_lock_init()) call - which acts as the key. For static + * locks we use the lock object itself as the key. + */ + if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) + __error_too_big_MAX_LOCKDEP_SUBCLASSES(); + + key = lock->key->subkeys + subclass; + + hash_head = classhashentry(key); + + /* + * We can walk the hash lockfree, because the hash only + * grows, and we are careful when adding entries to the end: + */ + list_for_each_entry(class, hash_head, hash_entry) + if (class->key == key) + goto out_set; + + /* + * Debug-check: all keys must be persistent! + */ + if (!static_obj(lock->key)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); + + return NULL; + } + + __raw_spin_lock(&hash_lock); + /* + * We have to do the hash-walk again, to avoid races + * with another CPU: + */ + list_for_each_entry(class, hash_head, hash_entry) + if (class->key == key) + goto out_unlock_set; + /* + * Allocate a new key from the static array, and add it to + * the hash: + */ + if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); + printk("turning off the locking correctness validator.\n"); + return NULL; + } + class = lock_classes + nr_lock_classes++; + debug_atomic_inc(&nr_unused_locks); + class->key = key; + class->name = lock->name; + class->subclass = subclass; + INIT_LIST_HEAD(&class->lock_entry); + INIT_LIST_HEAD(&class->locks_before); + INIT_LIST_HEAD(&class->locks_after); + class->name_version = count_matching_names(class); + /* + * We use RCU's safe list-add method to make + * parallel walking of the hash-list safe: + */ + list_add_tail_rcu(&class->hash_entry, hash_head); + + if (verbose(class)) { + __raw_spin_unlock(&hash_lock); + printk("\nnew class %p: %s", class->key, class->name); + if (class->name_version > 1) + printk("#%d", class->name_version); + printk("\n"); + dump_stack(); + __raw_spin_lock(&hash_lock); + } +out_unlock_set: + __raw_spin_unlock(&hash_lock); + +out_set: + lock->class[subclass] = class; + + DEBUG_LOCKS_WARN_ON(class->subclass != subclass); + + return class; +} + +/* + * Look up a dependency chain. If the key is not present yet then + * add it and return 0 - in this case the new dependency chain is + * validated. If the key is already hashed, return 1. + */ +static inline int lookup_chain_cache(u64 chain_key) +{ + struct list_head *hash_head = chainhashentry(chain_key); + struct lock_chain *chain; + + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + /* + * We can walk it lock-free, because entries only get added + * to the hash: + */ + list_for_each_entry(chain, hash_head, entry) { + if (chain->chain_key == chain_key) { +cache_hit: + debug_atomic_inc(&chain_lookup_hits); + /* + * In the debugging case, force redundant checking + * by returning 1: + */ +#ifdef CONFIG_DEBUG_LOCKDEP + __raw_spin_lock(&hash_lock); + return 1; +#endif + return 0; + } + } + /* + * Allocate a new chain entry from the static array, and add + * it to the hash: + */ + __raw_spin_lock(&hash_lock); + /* + * We have to walk the chain again locked - to avoid duplicates: + */ + list_for_each_entry(chain, hash_head, entry) { + if (chain->chain_key == chain_key) { + __raw_spin_unlock(&hash_lock); + goto cache_hit; + } + } + if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); + printk("turning off the locking correctness validator.\n"); + return 0; + } + chain = lock_chains + nr_lock_chains++; + chain->chain_key = chain_key; + list_add_tail_rcu(&chain->entry, hash_head); + debug_atomic_inc(&chain_lookup_misses); +#ifdef CONFIG_TRACE_IRQFLAGS + if (current->hardirq_context) + nr_hardirq_chains++; + else { + if (current->softirq_context) + nr_softirq_chains++; + else + nr_process_chains++; + } +#else + nr_process_chains++; +#endif + + return 1; +} + +/* + * We are building curr_chain_key incrementally, so double-check + * it from scratch, to make sure that it's done correctly: + */ +static void check_chain_key(struct task_struct *curr) +{ +#ifdef CONFIG_DEBUG_LOCKDEP + struct held_lock *hlock, *prev_hlock = NULL; + unsigned int i, id; + u64 chain_key = 0; + + for (i = 0; i < curr->lockdep_depth; i++) { + hlock = curr->held_locks + i; + if (chain_key != hlock->prev_chain_key) { + debug_locks_off(); + printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", + curr->lockdep_depth, i, + (unsigned long long)chain_key, + (unsigned long long)hlock->prev_chain_key); + WARN_ON(1); + return; + } + id = hlock->class - lock_classes; + DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); + if (prev_hlock && (prev_hlock->irq_context != + hlock->irq_context)) + chain_key = 0; + chain_key = iterate_chain_key(chain_key, id); + prev_hlock = hlock; + } + if (chain_key != curr->curr_chain_key) { + debug_locks_off(); + printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", + curr->lockdep_depth, i, + (unsigned long long)chain_key, + (unsigned long long)curr->curr_chain_key); + WARN_ON(1); + } +#endif +} + +#ifdef CONFIG_TRACE_IRQFLAGS + +/* + * print irq inversion bug: + */ +static int +print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, + struct held_lock *this, int forwards, + const char *irqclass) +{ + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + if (debug_locks_silent) + return 0; + + printk("\n=========================================================\n"); + printk( "[ INFO: possible irq lock inversion dependency detected ]\n"); + printk( "---------------------------------------------------------\n"); + printk("%s/%d just changed the state of lock:\n", + curr->comm, curr->pid); + print_lock(this); + if (forwards) + printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); + else + printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); + print_lock_name(other); + printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); + + printk("\nother info that might help us debug this:\n"); + lockdep_print_held_locks(curr); + + printk("\nthe first lock's dependencies:\n"); + print_lock_dependencies(this->class, 0); + + printk("\nthe second lock's dependencies:\n"); + print_lock_dependencies(other, 0); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +/* + * Prove that in the forwards-direction subgraph starting at + * there is no lock matching : + */ +static int +check_usage_forwards(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit bit, const char *irqclass) +{ + int ret; + + find_usage_bit = bit; + /* fills in */ + ret = find_usage_forwards(this->class, 0); + if (!ret || ret == 1) + return ret; + + return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); +} + +/* + * Prove that in the backwards-direction subgraph starting at + * there is no lock matching : + */ +static int +check_usage_backwards(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit bit, const char *irqclass) +{ + int ret; + + find_usage_bit = bit; + /* fills in */ + ret = find_usage_backwards(this->class, 0); + if (!ret || ret == 1) + return ret; + + return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); +} + +static inline void print_irqtrace_events(struct task_struct *curr) +{ + printk("irq event stamp: %u\n", curr->irq_events); + printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); + print_ip_sym(curr->hardirq_enable_ip); + printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); + print_ip_sym(curr->hardirq_disable_ip); + printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); + print_ip_sym(curr->softirq_enable_ip); + printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); + print_ip_sym(curr->softirq_disable_ip); +} + +#else +static inline void print_irqtrace_events(struct task_struct *curr) +{ +} +#endif + +static int +print_usage_bug(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) +{ + __raw_spin_unlock(&hash_lock); + debug_locks_off(); + if (debug_locks_silent) + return 0; + + printk("\n=================================\n"); + printk( "[ INFO: inconsistent lock state ]\n"); + printk( "---------------------------------\n"); + + printk("inconsistent {%s} -> {%s} usage.\n", + usage_str[prev_bit], usage_str[new_bit]); + + printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", + curr->comm, curr->pid, + trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, + trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, + trace_hardirqs_enabled(curr), + trace_softirqs_enabled(curr)); + print_lock(this); + + printk("{%s} state was registered at:\n", usage_str[prev_bit]); + print_stack_trace(this->class->usage_traces + prev_bit, 1); + + print_irqtrace_events(curr); + printk("\nother info that might help us debug this:\n"); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +/* + * Print out an error if an invalid bit is set: + */ +static inline int +valid_state(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) +{ + if (unlikely(this->class->usage_mask & (1 << bad_bit))) + return print_usage_bug(curr, this, bad_bit, new_bit); + return 1; +} + +#define STRICT_READ_CHECKS 1 + +/* + * Mark a lock with a usage bit, and validate the state transition: + */ +static int mark_lock(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit new_bit, unsigned long ip) +{ + unsigned int new_mask = 1 << new_bit, ret = 1; + + /* + * If already set then do not dirty the cacheline, + * nor do any checks: + */ + if (likely(this->class->usage_mask & new_mask)) + return 1; + + __raw_spin_lock(&hash_lock); + /* + * Make sure we didnt race: + */ + if (unlikely(this->class->usage_mask & new_mask)) { + __raw_spin_unlock(&hash_lock); + return 1; + } + + this->class->usage_mask |= new_mask; + +#ifdef CONFIG_TRACE_IRQFLAGS + if (new_bit == LOCK_ENABLED_HARDIRQS || + new_bit == LOCK_ENABLED_HARDIRQS_READ) + ip = curr->hardirq_enable_ip; + else if (new_bit == LOCK_ENABLED_SOFTIRQS || + new_bit == LOCK_ENABLED_SOFTIRQS_READ) + ip = curr->softirq_enable_ip; +#endif + if (!save_trace(this->class->usage_traces + new_bit)) + return 0; + + switch (new_bit) { +#ifdef CONFIG_TRACE_IRQFLAGS + case LOCK_USED_IN_HARDIRQ: + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_ENABLED_HARDIRQS_READ)) + return 0; + /* + * just marked it hardirq-safe, check that this lock + * took no hardirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_HARDIRQS, "hard")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-safe, check that this lock + * took no hardirq-unsafe-read lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_HARDIRQS_READ, "hard-read")) + return 0; +#endif + if (hardirq_verbose(this->class)) + ret = 2; + break; + case LOCK_USED_IN_SOFTIRQ: + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_ENABLED_SOFTIRQS_READ)) + return 0; + /* + * just marked it softirq-safe, check that this lock + * took no softirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_SOFTIRQS, "soft")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it softirq-safe, check that this lock + * took no softirq-unsafe-read lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) + return 0; +#endif + if (softirq_verbose(this->class)) + ret = 2; + break; + case LOCK_USED_IN_HARDIRQ_READ: + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) + return 0; + /* + * just marked it hardirq-read-safe, check that this lock + * took no hardirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_HARDIRQS, "hard")) + return 0; + if (hardirq_verbose(this->class)) + ret = 2; + break; + case LOCK_USED_IN_SOFTIRQ_READ: + if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) + return 0; + /* + * just marked it softirq-read-safe, check that this lock + * took no softirq-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_ENABLED_SOFTIRQS, "soft")) + return 0; + if (softirq_verbose(this->class)) + ret = 2; + break; + case LOCK_ENABLED_HARDIRQS: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_USED_IN_HARDIRQ_READ)) + return 0; + /* + * just marked it hardirq-unsafe, check that no hardirq-safe + * lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_HARDIRQ, "hard")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-unsafe, check that no + * hardirq-safe-read lock in the system ever took + * it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_HARDIRQ_READ, "hard-read")) + return 0; +#endif + if (hardirq_verbose(this->class)) + ret = 2; + break; + case LOCK_ENABLED_SOFTIRQS: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_USED_IN_SOFTIRQ_READ)) + return 0; + /* + * just marked it softirq-unsafe, check that no softirq-safe + * lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_SOFTIRQ, "soft")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it softirq-unsafe, check that no + * softirq-safe-read lock in the system ever took + * it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) + return 0; +#endif + if (softirq_verbose(this->class)) + ret = 2; + break; + case LOCK_ENABLED_HARDIRQS_READ: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it hardirq-read-unsafe, check that no + * hardirq-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_HARDIRQ, "hard")) + return 0; +#endif + if (hardirq_verbose(this->class)) + ret = 2; + break; + case LOCK_ENABLED_SOFTIRQS_READ: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it softirq-read-unsafe, check that no + * softirq-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_SOFTIRQ, "soft")) + return 0; +#endif + if (softirq_verbose(this->class)) + ret = 2; + break; +#endif + case LOCK_USED: + /* + * Add it to the global list of classes: + */ + list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes); + debug_atomic_dec(&nr_unused_locks); + break; + default: + debug_locks_off(); + WARN_ON(1); + return 0; + } + + __raw_spin_unlock(&hash_lock); + + /* + * We must printk outside of the hash_lock: + */ + if (ret == 2) { + printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); + print_lock(this); + print_irqtrace_events(curr); + dump_stack(); + } + + return ret; +} + +#ifdef CONFIG_TRACE_IRQFLAGS +/* + * Mark all held locks with a usage bit: + */ +static int +mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip) +{ + enum lock_usage_bit usage_bit; + struct held_lock *hlock; + int i; + + for (i = 0; i < curr->lockdep_depth; i++) { + hlock = curr->held_locks + i; + + if (hardirq) { + if (hlock->read) + usage_bit = LOCK_ENABLED_HARDIRQS_READ; + else + usage_bit = LOCK_ENABLED_HARDIRQS; + } else { + if (hlock->read) + usage_bit = LOCK_ENABLED_SOFTIRQS_READ; + else + usage_bit = LOCK_ENABLED_SOFTIRQS; + } + if (!mark_lock(curr, hlock, usage_bit, ip)) + return 0; + } + + return 1; +} + +/* + * Debugging helper: via this flag we know that we are in + * 'early bootup code', and will warn about any invalid irqs-on event: + */ +static int early_boot_irqs_enabled; + +void early_boot_irqs_off(void) +{ + early_boot_irqs_enabled = 0; +} + +void early_boot_irqs_on(void) +{ + early_boot_irqs_enabled = 1; +} + +/* + * Hardirqs will be enabled: + */ +void trace_hardirqs_on(void) +{ + struct task_struct *curr = current; + unsigned long ip; + + if (unlikely(!debug_locks || current->lockdep_recursion)) + return; + + if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) + return; + + if (unlikely(curr->hardirqs_enabled)) { + debug_atomic_inc(&redundant_hardirqs_on); + return; + } + /* we'll do an OFF -> ON transition: */ + curr->hardirqs_enabled = 1; + ip = (unsigned long) __builtin_return_address(0); + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return; + if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) + return; + /* + * We are going to turn hardirqs on, so set the + * usage bit for all held locks: + */ + if (!mark_held_locks(curr, 1, ip)) + return; + /* + * If we have softirqs enabled, then set the usage + * bit for all held locks. (disabled hardirqs prevented + * this bit from being set before) + */ + if (curr->softirqs_enabled) + if (!mark_held_locks(curr, 0, ip)) + return; + + curr->hardirq_enable_ip = ip; + curr->hardirq_enable_event = ++curr->irq_events; + debug_atomic_inc(&hardirqs_on_events); +} + +EXPORT_SYMBOL(trace_hardirqs_on); + +/* + * Hardirqs were disabled: + */ +void trace_hardirqs_off(void) +{ + struct task_struct *curr = current; + + if (unlikely(!debug_locks || current->lockdep_recursion)) + return; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return; + + if (curr->hardirqs_enabled) { + /* + * We have done an ON -> OFF transition: + */ + curr->hardirqs_enabled = 0; + curr->hardirq_disable_ip = _RET_IP_; + curr->hardirq_disable_event = ++curr->irq_events; + debug_atomic_inc(&hardirqs_off_events); + } else + debug_atomic_inc(&redundant_hardirqs_off); +} + +EXPORT_SYMBOL(trace_hardirqs_off); + +/* + * Softirqs will be enabled: + */ +void trace_softirqs_on(unsigned long ip) +{ + struct task_struct *curr = current; + + if (unlikely(!debug_locks)) + return; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return; + + if (curr->softirqs_enabled) { + debug_atomic_inc(&redundant_softirqs_on); + return; + } + + /* + * We'll do an OFF -> ON transition: + */ + curr->softirqs_enabled = 1; + curr->softirq_enable_ip = ip; + curr->softirq_enable_event = ++curr->irq_events; + debug_atomic_inc(&softirqs_on_events); + /* + * We are going to turn softirqs on, so set the + * usage bit for all held locks, if hardirqs are + * enabled too: + */ + if (curr->hardirqs_enabled) + mark_held_locks(curr, 0, ip); +} + +/* + * Softirqs were disabled: + */ +void trace_softirqs_off(unsigned long ip) +{ + struct task_struct *curr = current; + + if (unlikely(!debug_locks)) + return; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return; + + if (curr->softirqs_enabled) { + /* + * We have done an ON -> OFF transition: + */ + curr->softirqs_enabled = 0; + curr->softirq_disable_ip = ip; + curr->softirq_disable_event = ++curr->irq_events; + debug_atomic_inc(&softirqs_off_events); + DEBUG_LOCKS_WARN_ON(!softirq_count()); + } else + debug_atomic_inc(&redundant_softirqs_off); +} + +#endif + +/* + * Initialize a lock instance's lock-class mapping info: + */ +void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key) +{ + if (unlikely(!debug_locks)) + return; + + if (DEBUG_LOCKS_WARN_ON(!key)) + return; + if (DEBUG_LOCKS_WARN_ON(!name)) + return; + /* + * Sanity check, the lock-class key must be persistent: + */ + if (!static_obj(key)) { + printk("BUG: key %p not in .data!\n", key); + DEBUG_LOCKS_WARN_ON(1); + return; + } + lock->name = name; + lock->key = key; + memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES); +} + +EXPORT_SYMBOL_GPL(lockdep_init_map); + +/* + * This gets called for every mutex_lock*()/spin_lock*() operation. + * We maintain the dependency maps and validate the locking attempt: + */ +static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, int hardirqs_off, + unsigned long ip) +{ + struct task_struct *curr = current; + struct held_lock *hlock; + struct lock_class *class; + unsigned int depth, id; + int chain_head = 0; + u64 chain_key; + + if (unlikely(!debug_locks)) + return 0; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return 0; + + if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { + debug_locks_off(); + printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); + printk("turning off the locking correctness validator.\n"); + return 0; + } + + class = lock->class[subclass]; + /* not cached yet? */ + if (unlikely(!class)) { + class = register_lock_class(lock, subclass); + if (!class) + return 0; + } + debug_atomic_inc((atomic_t *)&class->ops); + if (very_verbose(class)) { + printk("\nacquire class [%p] %s", class->key, class->name); + if (class->name_version > 1) + printk("#%d", class->name_version); + printk("\n"); + dump_stack(); + } + + /* + * Add the lock to the list of currently held locks. + * (we dont increase the depth just yet, up until the + * dependency checks are done) + */ + depth = curr->lockdep_depth; + if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) + return 0; + + hlock = curr->held_locks + depth; + + hlock->class = class; + hlock->acquire_ip = ip; + hlock->instance = lock; + hlock->trylock = trylock; + hlock->read = read; + hlock->check = check; + hlock->hardirqs_off = hardirqs_off; + + if (check != 2) + goto out_calc_hash; +#ifdef CONFIG_TRACE_IRQFLAGS + /* + * If non-trylock use in a hardirq or softirq context, then + * mark the lock as used in these contexts: + */ + if (!trylock) { + if (read) { + if (curr->hardirq_context) + if (!mark_lock(curr, hlock, + LOCK_USED_IN_HARDIRQ_READ, ip)) + return 0; + if (curr->softirq_context) + if (!mark_lock(curr, hlock, + LOCK_USED_IN_SOFTIRQ_READ, ip)) + return 0; + } else { + if (curr->hardirq_context) + if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip)) + return 0; + if (curr->softirq_context) + if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip)) + return 0; + } + } + if (!hardirqs_off) { + if (read) { + if (!mark_lock(curr, hlock, + LOCK_ENABLED_HARDIRQS_READ, ip)) + return 0; + if (curr->softirqs_enabled) + if (!mark_lock(curr, hlock, + LOCK_ENABLED_SOFTIRQS_READ, ip)) + return 0; + } else { + if (!mark_lock(curr, hlock, + LOCK_ENABLED_HARDIRQS, ip)) + return 0; + if (curr->softirqs_enabled) + if (!mark_lock(curr, hlock, + LOCK_ENABLED_SOFTIRQS, ip)) + return 0; + } + } +#endif + /* mark it as used: */ + if (!mark_lock(curr, hlock, LOCK_USED, ip)) + return 0; +out_calc_hash: + /* + * Calculate the chain hash: it's the combined has of all the + * lock keys along the dependency chain. We save the hash value + * at every step so that we can get the current hash easily + * after unlock. The chain hash is then used to cache dependency + * results. + * + * The 'key ID' is what is the most compact key value to drive + * the hash, not class->key. + */ + id = class - lock_classes; + if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) + return 0; + + chain_key = curr->curr_chain_key; + if (!depth) { + if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) + return 0; + chain_head = 1; + } + + hlock->prev_chain_key = chain_key; + +#ifdef CONFIG_TRACE_IRQFLAGS + /* + * Keep track of points where we cross into an interrupt context: + */ + hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + + curr->softirq_context; + if (depth) { + struct held_lock *prev_hlock; + + prev_hlock = curr->held_locks + depth-1; + /* + * If we cross into another context, reset the + * hash key (this also prevents the checking and the + * adding of the dependency to 'prev'): + */ + if (prev_hlock->irq_context != hlock->irq_context) { + chain_key = 0; + chain_head = 1; + } + } +#endif + chain_key = iterate_chain_key(chain_key, id); + curr->curr_chain_key = chain_key; + + /* + * Trylock needs to maintain the stack of held locks, but it + * does not add new dependencies, because trylock can be done + * in any order. + * + * We look up the chain_key and do the O(N^2) check and update of + * the dependencies only if this is a new dependency chain. + * (If lookup_chain_cache() returns with 1 it acquires + * hash_lock for us) + */ + if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) { + /* + * Check whether last held lock: + * + * - is irq-safe, if this lock is irq-unsafe + * - is softirq-safe, if this lock is hardirq-unsafe + * + * And check whether the new lock's dependency graph + * could lead back to the previous lock. + * + * any of these scenarios could lead to a deadlock. If + * All validations + */ + int ret = check_deadlock(curr, hlock, lock, read); + + if (!ret) + return 0; + /* + * Mark recursive read, as we jump over it when + * building dependencies (just like we jump over + * trylock entries): + */ + if (ret == 2) + hlock->read = 2; + /* + * Add dependency only if this lock is not the head + * of the chain, and if it's not a secondary read-lock: + */ + if (!chain_head && ret != 2) + if (!check_prevs_add(curr, hlock)) + return 0; + __raw_spin_unlock(&hash_lock); + } + curr->lockdep_depth++; + check_chain_key(curr); + if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { + debug_locks_off(); + printk("BUG: MAX_LOCK_DEPTH too low!\n"); + printk("turning off the locking correctness validator.\n"); + return 0; + } + if (unlikely(curr->lockdep_depth > max_lockdep_depth)) + max_lockdep_depth = curr->lockdep_depth; + + return 1; +} + +static int +print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, + unsigned long ip) +{ + if (!debug_locks_off()) + return 0; + if (debug_locks_silent) + return 0; + + printk("\n=====================================\n"); + printk( "[ BUG: bad unlock balance detected! ]\n"); + printk( "-------------------------------------\n"); + printk("%s/%d is trying to release lock (", + curr->comm, curr->pid); + print_lockdep_cache(lock); + printk(") at:\n"); + print_ip_sym(ip); + printk("but there are no more locks to release!\n"); + printk("\nother info that might help us debug this:\n"); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +/* + * Common debugging checks for both nested and non-nested unlock: + */ +static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, + unsigned long ip) +{ + if (unlikely(!debug_locks)) + return 0; + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return 0; + + if (curr->lockdep_depth <= 0) + return print_unlock_inbalance_bug(curr, lock, ip); + + return 1; +} + +/* + * Remove the lock to the list of currently held locks in a + * potentially non-nested (out of order) manner. This is a + * relatively rare operation, as all the unlock APIs default + * to nested mode (which uses lock_release()): + */ +static int +lock_release_non_nested(struct task_struct *curr, + struct lockdep_map *lock, unsigned long ip) +{ + struct held_lock *hlock, *prev_hlock; + unsigned int depth; + int i; + + /* + * Check whether the lock exists in the current stack + * of held locks: + */ + depth = curr->lockdep_depth; + if (DEBUG_LOCKS_WARN_ON(!depth)) + return 0; + + prev_hlock = NULL; + for (i = depth-1; i >= 0; i--) { + hlock = curr->held_locks + i; + /* + * We must not cross into another context: + */ + if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) + break; + if (hlock->instance == lock) + goto found_it; + prev_hlock = hlock; + } + return print_unlock_inbalance_bug(curr, lock, ip); + +found_it: + /* + * We have the right lock to unlock, 'hlock' points to it. + * Now we remove it from the stack, and add back the other + * entries (if any), recalculating the hash along the way: + */ + curr->lockdep_depth = i; + curr->curr_chain_key = hlock->prev_chain_key; + + for (i++; i < depth; i++) { + hlock = curr->held_locks + i; + if (!__lock_acquire(hlock->instance, + hlock->class->subclass, hlock->trylock, + hlock->read, hlock->check, hlock->hardirqs_off, + hlock->acquire_ip)) + return 0; + } + + if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) + return 0; + return 1; +} + +/* + * Remove the lock to the list of currently held locks - this gets + * called on mutex_unlock()/spin_unlock*() (or on a failed + * mutex_lock_interruptible()). This is done for unlocks that nest + * perfectly. (i.e. the current top of the lock-stack is unlocked) + */ +static int lock_release_nested(struct task_struct *curr, + struct lockdep_map *lock, unsigned long ip) +{ + struct held_lock *hlock; + unsigned int depth; + + /* + * Pop off the top of the lock stack: + */ + depth = curr->lockdep_depth - 1; + hlock = curr->held_locks + depth; + + /* + * Is the unlock non-nested: + */ + if (hlock->instance != lock) + return lock_release_non_nested(curr, lock, ip); + curr->lockdep_depth--; + + if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) + return 0; + + curr->curr_chain_key = hlock->prev_chain_key; + +#ifdef CONFIG_DEBUG_LOCKDEP + hlock->prev_chain_key = 0; + hlock->class = NULL; + hlock->acquire_ip = 0; + hlock->irq_context = 0; +#endif + return 1; +} + +/* + * Remove the lock to the list of currently held locks - this gets + * called on mutex_unlock()/spin_unlock*() (or on a failed + * mutex_lock_interruptible()). This is done for unlocks that nest + * perfectly. (i.e. the current top of the lock-stack is unlocked) + */ +static void +__lock_release(struct lockdep_map *lock, int nested, unsigned long ip) +{ + struct task_struct *curr = current; + + if (!check_unlock(curr, lock, ip)) + return; + + if (nested) { + if (!lock_release_nested(curr, lock, ip)) + return; + } else { + if (!lock_release_non_nested(curr, lock, ip)) + return; + } + + check_chain_key(curr); +} + +/* + * Check whether we follow the irq-flags state precisely: + */ +static void check_flags(unsigned long flags) +{ +#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS) + if (!debug_locks) + return; + + if (irqs_disabled_flags(flags)) + DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled); + else + DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled); + + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only + * check if not in hardirq contexts: + */ + if (!hardirq_count()) { + if (softirq_count()) + DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); + else + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + + if (!debug_locks) + print_irqtrace_events(current); +#endif +} + +/* + * We are not always called with irqs disabled - do that here, + * and also avoid lockdep recursion: + */ +void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, unsigned long ip) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + check_flags(flags); + + current->lockdep_recursion = 1; + __lock_acquire(lock, subclass, trylock, read, check, + irqs_disabled_flags(flags), ip); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(lock_acquire); + +void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + check_flags(flags); + current->lockdep_recursion = 1; + __lock_release(lock, nested, ip); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(lock_release); + +/* + * Used by the testsuite, sanitize the validator state + * after a simulated failure: + */ + +void lockdep_reset(void) +{ + unsigned long flags; + + raw_local_irq_save(flags); + current->curr_chain_key = 0; + current->lockdep_depth = 0; + current->lockdep_recursion = 0; + memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); + nr_hardirq_chains = 0; + nr_softirq_chains = 0; + nr_process_chains = 0; + debug_locks = 1; + raw_local_irq_restore(flags); +} + +static void zap_class(struct lock_class *class) +{ + int i; + + /* + * Remove all dependencies this lock is + * involved in: + */ + for (i = 0; i < nr_list_entries; i++) { + if (list_entries[i].class == class) + list_del_rcu(&list_entries[i].entry); + } + /* + * Unhash the class and remove it from the all_lock_classes list: + */ + list_del_rcu(&class->hash_entry); + list_del_rcu(&class->lock_entry); + +} + +static inline int within(void *addr, void *start, unsigned long size) +{ + return addr >= start && addr < start + size; +} + +void lockdep_free_key_range(void *start, unsigned long size) +{ + struct lock_class *class, *next; + struct list_head *head; + unsigned long flags; + int i; + + raw_local_irq_save(flags); + __raw_spin_lock(&hash_lock); + + /* + * Unhash all classes that were created by this module: + */ + for (i = 0; i < CLASSHASH_SIZE; i++) { + head = classhash_table + i; + if (list_empty(head)) + continue; + list_for_each_entry_safe(class, next, head, hash_entry) + if (within(class->key, start, size)) + zap_class(class); + } + + __raw_spin_unlock(&hash_lock); + raw_local_irq_restore(flags); +} + +void lockdep_reset_lock(struct lockdep_map *lock) +{ + struct lock_class *class, *next, *entry; + struct list_head *head; + unsigned long flags; + int i, j; + + raw_local_irq_save(flags); + __raw_spin_lock(&hash_lock); + + /* + * Remove all classes this lock has: + */ + for (i = 0; i < CLASSHASH_SIZE; i++) { + head = classhash_table + i; + if (list_empty(head)) + continue; + list_for_each_entry_safe(class, next, head, hash_entry) { + for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { + entry = lock->class[j]; + if (class == entry) { + zap_class(class); + lock->class[j] = NULL; + break; + } + } + } + } + + /* + * Debug check: in the end all mapped classes should + * be gone. + */ + for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { + entry = lock->class[j]; + if (!entry) + continue; + __raw_spin_unlock(&hash_lock); + DEBUG_LOCKS_WARN_ON(1); + raw_local_irq_restore(flags); + return; + } + + __raw_spin_unlock(&hash_lock); + raw_local_irq_restore(flags); +} + +void __init lockdep_init(void) +{ + int i; + + /* + * Some architectures have their own start_kernel() + * code which calls lockdep_init(), while we also + * call lockdep_init() from the start_kernel() itself, + * and we want to initialize the hashes only once: + */ + if (lockdep_initialized) + return; + + for (i = 0; i < CLASSHASH_SIZE; i++) + INIT_LIST_HEAD(classhash_table + i); + + for (i = 0; i < CHAINHASH_SIZE; i++) + INIT_LIST_HEAD(chainhash_table + i); + + lockdep_initialized = 1; +} + +void __init lockdep_info(void) +{ + printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); + + printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); + printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); + printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); + printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); + printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); + printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); + printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); + + printk(" memory used by lock dependency info: %lu kB\n", + (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + + sizeof(struct list_head) * CLASSHASH_SIZE + + sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + + sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + + sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); + + printk(" per task-struct memory footprint: %lu bytes\n", + sizeof(struct held_lock) * MAX_LOCK_DEPTH); + +#ifdef CONFIG_DEBUG_LOCKDEP + if (lockdep_init_error) + printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n"); +#endif +} + +static inline int in_range(const void *start, const void *addr, const void *end) +{ + return addr >= start && addr <= end; +} + +static void +print_freed_lock_bug(struct task_struct *curr, const void *mem_from, + const void *mem_to) +{ + if (!debug_locks_off()) + return; + if (debug_locks_silent) + return; + + printk("\n=========================\n"); + printk( "[ BUG: held lock freed! ]\n"); + printk( "-------------------------\n"); + printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", + curr->comm, curr->pid, mem_from, mem_to-1); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); +} + +/* + * Called when kernel memory is freed (or unmapped), or if a lock + * is destroyed or reinitialized - this code checks whether there is + * any held lock in the memory range of to : + */ +void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) +{ + const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; + struct task_struct *curr = current; + struct held_lock *hlock; + unsigned long flags; + int i; + + if (unlikely(!debug_locks)) + return; + + local_irq_save(flags); + for (i = 0; i < curr->lockdep_depth; i++) { + hlock = curr->held_locks + i; + + lock_from = (void *)hlock->instance; + lock_to = (void *)(hlock->instance + 1); + + if (!in_range(mem_from, lock_from, mem_to) && + !in_range(mem_from, lock_to, mem_to)) + continue; + + print_freed_lock_bug(curr, mem_from, mem_to); + break; + } + local_irq_restore(flags); +} + +static void print_held_locks_bug(struct task_struct *curr) +{ + if (!debug_locks_off()) + return; + if (debug_locks_silent) + return; + + printk("\n=====================================\n"); + printk( "[ BUG: lock held at task exit time! ]\n"); + printk( "-------------------------------------\n"); + printk("%s/%d is exiting with locks still held!\n", + curr->comm, curr->pid); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); +} + +void debug_check_no_locks_held(struct task_struct *task) +{ + if (unlikely(task->lockdep_depth > 0)) + print_held_locks_bug(task); +} + +void debug_show_all_locks(void) +{ + struct task_struct *g, *p; + int count = 10; + int unlock = 1; + + printk("\nShowing all locks held in the system:\n"); + + /* + * Here we try to get the tasklist_lock as hard as possible, + * if not successful after 2 seconds we ignore it (but keep + * trying). This is to enable a debug printout even if a + * tasklist_lock-holding task deadlocks or crashes. + */ +retry: + if (!read_trylock(&tasklist_lock)) { + if (count == 10) + printk("hm, tasklist_lock locked, retrying... "); + if (count) { + count--; + printk(" #%d", 10-count); + mdelay(200); + goto retry; + } + printk(" ignoring it.\n"); + unlock = 0; + } + if (count != 10) + printk(" locked it.\n"); + + do_each_thread(g, p) { + if (p->lockdep_depth) + lockdep_print_held_locks(p); + if (!unlock) + if (read_trylock(&tasklist_lock)) + unlock = 1; + } while_each_thread(g, p); + + printk("\n"); + printk("=============================================\n\n"); + + if (unlock) + read_unlock(&tasklist_lock); +} + +EXPORT_SYMBOL_GPL(debug_show_all_locks); + +void debug_show_held_locks(struct task_struct *task) +{ + lockdep_print_held_locks(task); +} + +EXPORT_SYMBOL_GPL(debug_show_held_locks); + diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h new file mode 100644 index 000000000000..0d355f24fe04 --- /dev/null +++ b/kernel/lockdep_internals.h @@ -0,0 +1,78 @@ +/* + * kernel/lockdep_internals.h + * + * Runtime locking correctness validator + * + * lockdep subsystem internal functions and variables. + */ + +/* + * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies + * we track. + * + * We use the per-lock dependency maps in two ways: we grow it by adding + * every to-be-taken lock to all currently held lock's own dependency + * table (if it's not there yet), and we check it for lock order + * conflicts and deadlocks. + */ +#define MAX_LOCKDEP_ENTRIES 8192UL + +#define MAX_LOCKDEP_KEYS_BITS 11 +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) + +#define MAX_LOCKDEP_CHAINS_BITS 13 +#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) + +/* + * Stack-trace: tightly packed array of stack backtrace + * addresses. Protected by the hash_lock. + */ +#define MAX_STACK_TRACE_ENTRIES 131072UL + +extern struct list_head all_lock_classes; + +extern void +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); + +extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); + +extern unsigned long nr_lock_classes; +extern unsigned long nr_list_entries; +extern unsigned long nr_lock_chains; +extern unsigned long nr_stack_trace_entries; + +extern unsigned int nr_hardirq_chains; +extern unsigned int nr_softirq_chains; +extern unsigned int nr_process_chains; +extern unsigned int max_lockdep_depth; +extern unsigned int max_recursion_depth; + +#ifdef CONFIG_DEBUG_LOCKDEP +/* + * Various lockdep statistics: + */ +extern atomic_t chain_lookup_hits; +extern atomic_t chain_lookup_misses; +extern atomic_t hardirqs_on_events; +extern atomic_t hardirqs_off_events; +extern atomic_t redundant_hardirqs_on; +extern atomic_t redundant_hardirqs_off; +extern atomic_t softirqs_on_events; +extern atomic_t softirqs_off_events; +extern atomic_t redundant_softirqs_on; +extern atomic_t redundant_softirqs_off; +extern atomic_t nr_unused_locks; +extern atomic_t nr_cyclic_checks; +extern atomic_t nr_cyclic_check_recursions; +extern atomic_t nr_find_usage_forwards_checks; +extern atomic_t nr_find_usage_forwards_recursions; +extern atomic_t nr_find_usage_backwards_checks; +extern atomic_t nr_find_usage_backwards_recursions; +# define debug_atomic_inc(ptr) atomic_inc(ptr) +# define debug_atomic_dec(ptr) atomic_dec(ptr) +# define debug_atomic_read(ptr) atomic_read(ptr) +#else +# define debug_atomic_inc(ptr) do { } while (0) +# define debug_atomic_dec(ptr) do { } while (0) +# define debug_atomic_read(ptr) 0 +#endif diff --git a/kernel/module.c b/kernel/module.c index 0351625767b1..35e1b1f859d7 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1121,6 +1121,9 @@ static void free_module(struct module *mod) if (mod->percpu) percpu_modfree(mod->percpu); + /* Free lock-classes: */ + lockdep_free_key_range(mod->module_core, mod->core_size); + /* Finally, free the core (containing the module structure) */ module_free(mod, mod->module_core); } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 16021b09c184..16c2e98b7638 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -48,7 +48,7 @@ config DEBUG_KERNEL config LOG_BUF_SHIFT int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL range 12 21 - default 17 if S390 + default 17 if S390 || LOCKDEP default 16 if X86_NUMAQ || IA64 default 15 if SMP default 14 diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 5cd05f20bdec..9bdc8d440b2b 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -889,9 +890,6 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #include "locking-selftest-softirq.h" // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) -#define lockdep_reset() -#define lockdep_reset_lock(x) - #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) -- cgit v1.2.3 From 4ea2176dfa714882e88180b474e4cbcd888b70af Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:53 -0700 Subject: [PATCH] lockdep: prove rwsem locking correctness Use the lock validator framework to prove rwsem locking correctness. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/rwsem.h | 35 +++++++++++++++++++------ include/asm-s390/rwsem.h | 31 ++++++++++++++++++++-- include/asm-s390/semaphore.h | 3 ++- include/linux/rwsem-spinlock.h | 23 ++++++++++++++-- include/linux/rwsem.h | 59 ++++++++++++++++++------------------------ kernel/Makefile | 2 +- kernel/rwsem.c | 42 ++++++++++++++++++++++++++++++ lib/rwsem-spinlock.c | 20 +++++++++++--- lib/rwsem.c | 20 ++++++++++++++ 9 files changed, 184 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index 558804e4a039..2f07601562e7 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h @@ -40,6 +40,7 @@ #include #include +#include struct rwsem_waiter; @@ -61,21 +62,34 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + + #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - } + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) /* * lock for reading @@ -128,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t" /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { int tmp; @@ -152,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the : "memory", "cc"); } +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h index 0422a085dd56..13ec16965150 100644 --- a/include/asm-s390/rwsem.h +++ b/include/asm-s390/rwsem.h @@ -61,6 +61,9 @@ struct rw_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; #ifndef __s390x__ @@ -80,8 +83,16 @@ struct rw_semaphore { /* * initialisation */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + #define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem) INIT_LIST_HEAD(&sem->wait_list); } +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + + /* * lock for reading */ @@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { signed long old, new, tmp; @@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem) rwsem_down_write_failed(sem); } +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h index 702cf436698c..32cdc69f39f4 100644 --- a/include/asm-s390/semaphore.h +++ b/include/asm-s390/semaphore.h @@ -37,7 +37,8 @@ struct semaphore { static inline void sema_init (struct semaphore *sem, int val) { - *sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val); + atomic_set(&sem->count, val); + init_waitqueue_head(&sem->wait); } static inline void init_MUTEX (struct semaphore *sem) diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index d68afcc36ac9..ae1fcadd598e 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -32,18 +32,37 @@ struct rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + #define __RWSEM_INITIALIZER(name) \ -{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + extern void FASTCALL(__down_read(struct rw_semaphore *sem)); extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__down_write(struct rw_semaphore *sem)); +extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass)); extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__up_read(struct rw_semaphore *sem)); extern void FASTCALL(__up_write(struct rw_semaphore *sem)); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 93581534b915..658afb37c3f5 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -27,64 +27,55 @@ struct rw_semaphore; /* * lock for reading */ -static inline void down_read(struct rw_semaphore *sem) -{ - might_sleep(); - __down_read(sem); -} +extern void down_read(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int down_read_trylock(struct rw_semaphore *sem) -{ - int ret; - ret = __down_read_trylock(sem); - return ret; -} +extern int down_read_trylock(struct rw_semaphore *sem); /* * lock for writing */ -static inline void down_write(struct rw_semaphore *sem) -{ - might_sleep(); - __down_write(sem); -} +extern void down_write(struct rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int down_write_trylock(struct rw_semaphore *sem) -{ - int ret; - ret = __down_write_trylock(sem); - return ret; -} +extern int down_write_trylock(struct rw_semaphore *sem); /* * release a read lock */ -static inline void up_read(struct rw_semaphore *sem) -{ - __up_read(sem); -} +extern void up_read(struct rw_semaphore *sem); /* * release a write lock */ -static inline void up_write(struct rw_semaphore *sem) -{ - __up_write(sem); -} +extern void up_write(struct rw_semaphore *sem); /* * downgrade write lock to read lock */ -static inline void downgrade_write(struct rw_semaphore *sem) -{ - __downgrade_write(sem); -} +extern void downgrade_write(struct rw_semaphore *sem); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * nested locking: + */ +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +/* + * Take/release a lock when not the owner will release it: + */ +extern void down_read_non_owner(struct rw_semaphore *sem); +extern void up_read_non_owner(struct rw_semaphore *sem); +#else +# define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nested(sem, subclass) down_write(sem) +# define down_read_non_owner(sem) down_read(sem) +# define up_read_non_owner(sem) up_read(sem) +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ diff --git a/kernel/Makefile b/kernel/Makefile index 3dd994eedc5c..df6ef3263699 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ - hrtimer.o + hrtimer.o rwsem.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 790a99bb25aa..291ded556aa0 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c @@ -103,3 +103,45 @@ void downgrade_write(struct rw_semaphore *sem) } EXPORT_SYMBOL(downgrade_write); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void down_read_nested(struct rw_semaphore *sem, int subclass) +{ + might_sleep(); + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); + + __down_read(sem); +} + +EXPORT_SYMBOL(down_read_nested); + +void down_read_non_owner(struct rw_semaphore *sem) +{ + might_sleep(); + + __down_read(sem); +} + +EXPORT_SYMBOL(down_read_non_owner); + +void down_write_nested(struct rw_semaphore *sem, int subclass) +{ + might_sleep(); + rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); + + __down_write_nested(sem, subclass); +} + +EXPORT_SYMBOL(down_write_nested); + +void up_read_non_owner(struct rw_semaphore *sem) +{ + __up_read(sem); +} + +EXPORT_SYMBOL(up_read_non_owner); + +#endif + + diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 03b6097eb04e..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -20,8 +20,16 @@ struct rwsem_waiter { /* * initialise the semaphore */ -void fastcall init_rwsem(struct rw_semaphore *sem) +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key); +#endif sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); @@ -183,7 +191,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -223,6 +231,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) ; } +void fastcall __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ @@ -292,9 +305,10 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(init_rwsem); +EXPORT_SYMBOL(__init_rwsem); EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read_trylock); +EXPORT_SYMBOL(__down_write_nested); EXPORT_SYMBOL(__down_write); EXPORT_SYMBOL(__down_write_trylock); EXPORT_SYMBOL(__up_read); diff --git a/lib/rwsem.c b/lib/rwsem.c index bae597284889..b322421c2969 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -8,6 +8,26 @@ #include #include +/* + * Initialize an rwsem: + */ +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key); +#endif + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +EXPORT_SYMBOL(__init_rwsem); + struct rwsem_waiter { struct list_head list; struct task_struct *task; -- cgit v1.2.3 From 8a25d5debff2daee280e83e09d8c25d67c26a972 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:54 -0700 Subject: [PATCH] lockdep: prove spinlock rwlock locking correctness Use the lock validator framework to prove spinlock and rwlock locking correctness. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/spinlock.h | 7 ++++ include/linux/spinlock.h | 63 ++++++++++++++++++++++--------- include/linux/spinlock_api_smp.h | 2 + include/linux/spinlock_api_up.h | 1 + include/linux/spinlock_types.h | 32 ++++++++++++++-- include/linux/spinlock_types_up.h | 9 ++++- include/linux/spinlock_up.h | 1 - kernel/Makefile | 1 + kernel/sched.c | 10 +++++ kernel/spinlock.c | 79 ++++++++++++++++++++++++++++++++++----- lib/kernel_lock.c | 7 +++- lib/spinlock_debug.c | 36 ++++++++++++++++++ net/ipv4/route.c | 3 +- 13 files changed, 217 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 7e29b51bcaa0..87c40f830653 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -68,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) "=m" (lock->slock) : : "memory"); } +/* + * It is easier for the lock validator if interrupts are not re-enabled + * in the middle of a lock-acquire. This is a performance feature anyway + * so we turn it off: + */ +#ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { alternative_smp( @@ -75,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla __raw_spin_lock_string_up, "=m" (lock->slock) : "r" (flags) : "memory"); } +#endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ae23beef9cc9..31473db92d3b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP # include #else # include #endif -#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +# define spin_lock_init(lock) \ + do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#endif #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) @@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); @@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) @@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define write_trylock(lock) __cond_lock(_write_trylock(lock)) #define spin_lock(lock) _spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +#else +# define spin_lock_nested(lock, subclass) _spin_lock(lock) +#endif + #define write_lock(lock) _write_lock(lock) #define read_lock(lock) _read_lock(lock) @@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * We inline the unlock functions in the nondebug case: */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ + !defined(CONFIG_SMP) # define spin_unlock(lock) _spin_unlock(lock) # define read_unlock(lock) _read_unlock(lock) # define write_unlock(lock) _write_unlock(lock) -#else -# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) -# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) -#endif - -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) # define spin_unlock_irq(lock) _spin_unlock_irq(lock) # define read_unlock_irq(lock) _read_unlock_irq(lock) # define write_unlock_irq(lock) _write_unlock_irq(lock) #else +# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) +# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) +# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) # define spin_unlock_irq(lock) \ do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) # define read_unlock_irq(lock) \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 78e6989ffb54..b2c4f8299464 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr); #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) + __acquires(spinlock_t); void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index cd81cee566f4..67faa044c5f5 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -49,6 +49,7 @@ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) #define _spin_lock(lock) __LOCK(lock) +#define _spin_lock_nested(lock, subclass) __LOCK(lock) #define _read_lock(lock) __LOCK(lock) #define _write_lock(lock) __LOCK(lock) #define _spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f5d4ed7bc785..dc5fb69e4de9 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,6 +9,8 @@ * Released under the General Public License (GPL). */ +#include + #if defined(CONFIG_SMP) # include #else @@ -24,6 +26,9 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -37,28 +42,47 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed #define SPINLOCK_OWNER_INIT ((void *)-1L) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + #ifdef CONFIG_DEBUG_SPINLOCK # define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } + .owner_cpu = -1, \ + SPIN_DEP_MAP_INIT(lockname) } #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } #else # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + SPIN_DEP_MAP_INIT(lockname) } #define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } #endif #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198e..27644af20b7c 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -12,10 +12,14 @@ * Released under the General Public License (GPL). */ -#ifdef CONFIG_DEBUG_SPINLOCK +#if defined(CONFIG_DEBUG_SPINLOCK) || \ + defined(CONFIG_DEBUG_LOCK_ALLOC) typedef struct { volatile unsigned int slock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } @@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 31accf2f0b13..ea54c4c9a4ec 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,7 +18,6 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK - #define __raw_spin_is_locked(x) ((x)->slock == 0) static inline void __raw_spin_lock(raw_spinlock_t *lock) diff --git a/kernel/Makefile b/kernel/Makefile index df6ef3263699..47dbcd570cd8 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o +obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KALLSYMS) += kallsyms.o diff --git a/kernel/sched.c b/kernel/sched.c index 911829966534..ae4db0185bb2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -308,6 +308,13 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) /* this is a valid case when another task releases the spinlock */ rq->lock.owner = current; #endif + /* + * If we are tracking spinlock dependencies then we have to + * fix up the runqueue lock - which gets 'carried over' from + * prev into current: + */ + spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); + spin_unlock_irq(&rq->lock); } @@ -1778,6 +1785,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) WARN_ON(rq->prev_mm); rq->prev_mm = oldmm; } + spin_release(&rq->lock.dep_map, 1, _THIS_IP_); /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); @@ -4384,6 +4392,7 @@ asmlinkage long sys_sched_yield(void) * no need to preempt or enable interrupts: */ __release(rq->lock); + spin_release(&rq->lock.dep_map, 1, _THIS_IP_); _raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); @@ -4447,6 +4456,7 @@ int cond_resched_lock(spinlock_t *lock) spin_lock(lock); } if (need_resched() && __resched_legal()) { + spin_release(&lock->dep_map, 1, _THIS_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); __cond_resched(); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index b31e54eadf56..bfd6ad9c0330 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -13,6 +13,7 @@ #include #include #include +#include #include /* @@ -29,8 +30,10 @@ EXPORT_SYMBOL(generic__raw_read_trylock); int __lockfunc _spin_trylock(spinlock_t *lock) { preempt_disable(); - if (_raw_spin_trylock(lock)) + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; + } preempt_enable(); return 0; @@ -40,8 +43,10 @@ EXPORT_SYMBOL(_spin_trylock); int __lockfunc _read_trylock(rwlock_t *lock) { preempt_disable(); - if (_raw_read_trylock(lock)) + if (_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); return 1; + } preempt_enable(); return 0; @@ -51,19 +56,28 @@ EXPORT_SYMBOL(_read_trylock); int __lockfunc _write_trylock(rwlock_t *lock) { preempt_disable(); - if (_raw_write_trylock(lock)) + if (_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; + } preempt_enable(); return 0; } EXPORT_SYMBOL(_write_trylock); -#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ + defined(CONFIG_PROVE_LOCKING) void __lockfunc _read_lock(rwlock_t *lock) { preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); _raw_read_lock(lock); } EXPORT_SYMBOL(_read_lock); @@ -74,7 +88,17 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) local_irq_save(flags); preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + /* + * On lockdep we dont want the hand-coded irq-enable of + * _raw_spin_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#ifdef CONFIG_PROVE_LOCKING + _raw_spin_lock(lock); +#else _raw_spin_lock_flags(lock, &flags); +#endif return flags; } EXPORT_SYMBOL(_spin_lock_irqsave); @@ -83,6 +107,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock) { local_irq_disable(); preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_spin_lock(lock); } EXPORT_SYMBOL(_spin_lock_irq); @@ -91,6 +116,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock) { local_bh_disable(); preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_spin_lock(lock); } EXPORT_SYMBOL(_spin_lock_bh); @@ -101,6 +127,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) local_irq_save(flags); preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); _raw_read_lock(lock); return flags; } @@ -110,6 +137,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock) { local_irq_disable(); preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); _raw_read_lock(lock); } EXPORT_SYMBOL(_read_lock_irq); @@ -118,6 +146,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock) { local_bh_disable(); preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); _raw_read_lock(lock); } EXPORT_SYMBOL(_read_lock_bh); @@ -128,6 +157,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) local_irq_save(flags); preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_write_lock(lock); return flags; } @@ -137,6 +167,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock) { local_irq_disable(); preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_write_lock(lock); } EXPORT_SYMBOL(_write_lock_irq); @@ -145,6 +176,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock) { local_bh_disable(); preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_write_lock(lock); } EXPORT_SYMBOL(_write_lock_bh); @@ -152,6 +184,7 @@ EXPORT_SYMBOL(_write_lock_bh); void __lockfunc _spin_lock(spinlock_t *lock) { preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_spin_lock(lock); } @@ -160,6 +193,7 @@ EXPORT_SYMBOL(_spin_lock); void __lockfunc _write_lock(rwlock_t *lock) { preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); _raw_write_lock(lock); } @@ -255,8 +289,22 @@ BUILD_LOCK_OPS(write, rwlock); #endif /* CONFIG_PREEMPT */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +{ + preempt_disable(); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + _raw_spin_lock(lock); +} + +EXPORT_SYMBOL(_spin_lock_nested); + +#endif + void __lockfunc _spin_unlock(spinlock_t *lock) { + spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable(); } @@ -264,6 +312,7 @@ EXPORT_SYMBOL(_spin_unlock); void __lockfunc _write_unlock(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); preempt_enable(); } @@ -271,6 +320,7 @@ EXPORT_SYMBOL(_write_unlock); void __lockfunc _read_unlock(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); preempt_enable(); } @@ -278,6 +328,7 @@ EXPORT_SYMBOL(_read_unlock); void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { + spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -286,6 +337,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore); void __lockfunc _spin_unlock_irq(spinlock_t *lock) { + spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); @@ -294,14 +346,16 @@ EXPORT_SYMBOL(_spin_unlock_irq); void __lockfunc _spin_unlock_bh(spinlock_t *lock) { + spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); - local_bh_enable(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } EXPORT_SYMBOL(_spin_unlock_bh); void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -310,6 +364,7 @@ EXPORT_SYMBOL(_read_unlock_irqrestore); void __lockfunc _read_unlock_irq(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); @@ -318,14 +373,16 @@ EXPORT_SYMBOL(_read_unlock_irq); void __lockfunc _read_unlock_bh(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); preempt_enable_no_resched(); - local_bh_enable(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } EXPORT_SYMBOL(_read_unlock_bh); void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -334,6 +391,7 @@ EXPORT_SYMBOL(_write_unlock_irqrestore); void __lockfunc _write_unlock_irq(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); @@ -342,9 +400,10 @@ EXPORT_SYMBOL(_write_unlock_irq); void __lockfunc _write_unlock_bh(rwlock_t *lock) { + rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); preempt_enable_no_resched(); - local_bh_enable(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } EXPORT_SYMBOL(_write_unlock_bh); @@ -352,11 +411,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) { local_bh_disable(); preempt_disable(); - if (_raw_spin_trylock(lock)) + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; + } preempt_enable_no_resched(); - local_bh_enable(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); return 0; } EXPORT_SYMBOL(_spin_trylock_bh); diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index e713e86811ae..e0fdfddb406e 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -177,7 +177,12 @@ static inline void __lock_kernel(void) static inline void __unlock_kernel(void) { - spin_unlock(&kernel_flag); + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ + _raw_spin_unlock(&kernel_flag); + preempt_enable(); } /* diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3de2ccf48ac6..3d9c4dc965ed 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -12,6 +12,42 @@ #include #include +void __spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key); +#endif + lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->magic = SPINLOCK_MAGIC; + lock->owner = SPINLOCK_OWNER_INIT; + lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__spin_lock_init); + +void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key); +#endif + lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->magic = RWLOCK_MAGIC; + lock->owner = SPINLOCK_OWNER_INIT; + lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__rwlock_init); + static void spin_bug(spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc5..283a72247e5e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -205,7 +205,8 @@ __u8 ip_tos2prio[16] = { struct rt_hash_bucket { struct rtable *chain; }; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ + defined(CONFIG_PROVE_LOCKING) /* * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks * The size of this table is a power of two and depends on the number of CPUS. -- cgit v1.2.3 From ef5d4707b9065c0cf8a69fa3716893f3b75201ba Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:55 -0700 Subject: [PATCH] lockdep: prove mutex locking correctness Use the lock validator framework to prove mutex locking correctness. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mutex-debug.h | 8 +++++++- include/linux/mutex.h | 31 ++++++++++++++++++++++++++++--- kernel/mutex-debug.c | 6 +++++- kernel/mutex.c | 28 ++++++++++++++++++++++------ kernel/mutex.h | 2 +- 5 files changed, 63 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h index 70a26091fc73..2537285e1064 100644 --- a/include/linux/mutex-debug.h +++ b/include/linux/mutex-debug.h @@ -2,6 +2,7 @@ #define __LINUX_MUTEX_DEBUG_H #include +#include /* * Mutexes - debugging helpers: @@ -10,7 +11,12 @@ #define __DEBUG_MUTEX_INITIALIZER(lockname) \ , .magic = &lockname -#define mutex_init(sem) __mutex_init(sem, __FILE__":"#sem) +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) extern void FASTCALL(mutex_destroy(struct mutex *lock)); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index caafecd5e366..27c48daa3183 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -53,6 +54,9 @@ struct mutex { const char *name; void *magic; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; /* @@ -72,20 +76,34 @@ struct mutex_waiter { # include #else # define __DEBUG_MUTEX_INITIALIZER(lockname) -# define mutex_init(mutex) __mutex_init(mutex, NULL) +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) # define mutex_destroy(mutex) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = SPIN_LOCK_UNLOCKED \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ - __DEBUG_MUTEX_INITIALIZER(lockname) } + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -extern void fastcall __mutex_init(struct mutex *lock, const char *name); +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); /*** * mutex_is_locked - is the mutex locked @@ -104,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) */ extern void fastcall mutex_lock(struct mutex *lock); extern int fastcall mutex_lock_interruptible(struct mutex *lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +#else +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +#endif + /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 5569766a1ea2..e3203c654dda 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c @@ -83,12 +83,16 @@ void debug_mutex_unlock(struct mutex *lock) DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); } -void debug_mutex_init(struct mutex *lock, const char *name) +void debug_mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key) { +#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key); +#endif lock->owner = NULL; lock->magic = lock; } diff --git a/kernel/mutex.c b/kernel/mutex.c index 43a50c18701a..8c71cf72a497 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -39,13 +39,14 @@ * * It is not allowed to initialize an already locked mutex. */ -__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) +void +__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { atomic_set(&lock->count, 1); spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); - debug_mutex_init(lock, name); + debug_mutex_init(lock, name, key); } EXPORT_SYMBOL(__mutex_init); @@ -131,6 +132,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) spin_lock_mutex(&lock->wait_lock, flags); debug_mutex_lock_common(lock, &waiter); + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); debug_mutex_add_waiter(lock, &waiter, task->thread_info); /* add waiting tasks to the end of the waitqueue (FIFO): */ @@ -158,6 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) if (unlikely(state == TASK_INTERRUPTIBLE && signal_pending(task))) { mutex_remove_waiter(lock, &waiter, task->thread_info); + mutex_release(&lock->dep_map, 1, _RET_IP_); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); @@ -194,16 +197,28 @@ __mutex_lock_slowpath(atomic_t *lock_count) __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); } +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __sched +mutex_lock_nested(struct mutex *lock, unsigned int subclass) +{ + might_sleep(); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); +} + +EXPORT_SYMBOL_GPL(mutex_lock_nested); +#endif + /* * Release the lock, slowpath: */ static fastcall inline void -__mutex_unlock_common_slowpath(atomic_t *lock_count) +__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) { struct mutex *lock = container_of(lock_count, struct mutex, count); unsigned long flags; spin_lock_mutex(&lock->wait_lock, flags); + mutex_release(&lock->dep_map, nested, _RET_IP_); debug_mutex_unlock(lock); /* @@ -236,7 +251,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count) static fastcall noinline void __mutex_unlock_slowpath(atomic_t *lock_count) { - __mutex_unlock_common_slowpath(lock_count); + __mutex_unlock_common_slowpath(lock_count, 1); } /* @@ -287,9 +302,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) spin_lock_mutex(&lock->wait_lock, flags); prev = atomic_xchg(&lock->count, -1); - if (likely(prev == 1)) + if (likely(prev == 1)) { debug_mutex_set_owner(lock, current_thread_info()); - + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } /* Set it back to 0 if there are no waiters: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); diff --git a/kernel/mutex.h b/kernel/mutex.h index aeb2d916aa0e..a075dafbb290 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h @@ -22,7 +22,7 @@ #define debug_mutex_free_waiter(waiter) do { } while (0) #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) #define debug_mutex_unlock(lock) do { } while (0) -#define debug_mutex_init(lock, name) do { } while (0) +#define debug_mutex_init(lock, name, key) do { } while (0) static inline void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) -- cgit v1.2.3 From a90b9c05df3c1e58eaedc28795d0f5abd896c098 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:04 -0700 Subject: [PATCH] lockdep: annotate dcache Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dcache.c | 4 ++-- include/linux/dcache.h | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/dcache.c b/fs/dcache.c index bec4de176c81..1b4a3a34ec57 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1339,10 +1339,10 @@ void d_move(struct dentry * dentry, struct dentry * target) */ if (target < dentry) { spin_lock(&target->d_lock); - spin_lock(&dentry->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); } else { spin_lock(&dentry->d_lock); - spin_lock(&target->d_lock); + spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); } /* Move the dentry to the target hash queue, if on different bucket */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 0dd1610a94a9..471781ffeab1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -114,6 +114,18 @@ struct dentry { unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); -- cgit v1.2.3 From f2eace23e924bd3f05aedea4fc505eb5508d2d93 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:05 -0700 Subject: [PATCH] lockdep: annotate i_mutex Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/usb/core/inode.c | 2 +- fs/namei.c | 20 ++++++++++---------- include/linux/fs.h | 19 +++++++++++++++++++ 3 files changed, 30 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index e47e3a8ed6e4..cf11196e012d 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -200,7 +200,7 @@ static void update_sb(struct super_block *sb) if (!root) return; - mutex_lock(&root->d_inode->i_mutex); + mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT); list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) { if (bus->d_inode) { diff --git a/fs/namei.c b/fs/namei.c index c784e8bb57a3..c9750d755aff 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1423,7 +1423,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) struct dentry *p; if (p1 == p2) { - mutex_lock(&p1->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } @@ -1431,22 +1431,22 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { - mutex_lock(&p2->d_inode->i_mutex); - mutex_lock(&p1->d_inode->i_mutex); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } } for (p = p2; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p1) { - mutex_lock(&p1->d_inode->i_mutex); - mutex_lock(&p2->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } } - mutex_lock(&p1->d_inode->i_mutex); - mutex_lock(&p2->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return NULL; } @@ -1751,7 +1751,7 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); - mutex_lock(&nd->dentry->d_inode->i_mutex); + mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) @@ -2008,7 +2008,7 @@ static long do_rmdir(int dfd, const char __user *pathname) error = -EBUSY; goto exit1; } - mutex_lock(&nd.dentry->d_inode->i_mutex); + mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { @@ -2082,7 +2082,7 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; - mutex_lock(&nd.dentry->d_inode->i_mutex); + mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { diff --git a/include/linux/fs.h b/include/linux/fs.h index e04a5cfe874f..05ded9e76b23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -542,6 +542,25 @@ struct inode { #endif }; +/* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: quota file + * + * The locking order between these classes is + * parent -> child -> normal -> quota + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_QUOTA +}; + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic -- cgit v1.2.3 From 243c7621aac4ed1aa79524c9a1cecf7c05a28124 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:06 -0700 Subject: [PATCH] lockdep: annotate genirq Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/lockdep.h | 6 ++++++ init/main.c | 1 + kernel/irq/handle.c | 16 ++++++++++++++++ 3 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 80ec7a4dbc98..316e0fb8d7b1 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -270,6 +270,12 @@ static inline int lockdep_internal(void) struct lock_class_key { }; #endif /* !LOCKDEP */ +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) +extern void early_init_irq_lock_class(void); +#else +# define early_init_irq_lock_class() do { } while (0) +#endif + #ifdef CONFIG_TRACE_IRQFLAGS extern void early_boot_irqs_off(void); extern void early_boot_irqs_on(void); diff --git a/init/main.c b/init/main.c index fc473d4b56fd..628b8e9e841a 100644 --- a/init/main.c +++ b/init/main.c @@ -466,6 +466,7 @@ asmlinkage void __init start_kernel(void) local_irq_disable(); early_boot_irqs_off(); + early_init_irq_lock_class(); /* * Interrupts are still disabled. Do necessary setups, then diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index aeb6e391276c..a7b497ee919e 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -249,3 +249,19 @@ out: return 1; } +#ifdef CONFIG_TRACE_IRQFLAGS + +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +static struct lock_class_key irq_desc_lock_class; + +void early_init_irq_lock_class(void) +{ + int i; + + for (i = 0; i < NR_IRQS; i++) + lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); +} + +#endif -- cgit v1.2.3 From eb4542b98c81e22e08587b747b21986a45360999 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:07 -0700 Subject: [PATCH] lockdep: annotate waitqueues Create one lock class for all waitqueue locks in the kernel. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/wait.h | 6 ++++++ kernel/wait.c | 4 ++++ 2 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/wait.h b/include/linux/wait.h index bc4f389c49bb..794be7af58ae 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -77,9 +77,15 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } +/* + * lockdep: we want one lock-class for all waitqueue locks. + */ +extern struct lock_class_key waitqueue_lock_key; + static inline void init_waitqueue_head(wait_queue_head_t *q) { spin_lock_init(&q->lock); + lockdep_set_class(&q->lock, &waitqueue_lock_key); INIT_LIST_HEAD(&q->task_list); } diff --git a/kernel/wait.c b/kernel/wait.c index 5985d866531f..a1d57aeb7f75 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -10,6 +10,10 @@ #include #include +struct lock_class_key waitqueue_lock_key; + +EXPORT_SYMBOL(waitqueue_lock_key); + void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; -- cgit v1.2.3 From 06825ba3553151eea24206bc53d4fc3de49e0ab1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:09 -0700 Subject: [PATCH] lockdep: annotate skb_queue_head_init Teach special (multi-initialized) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/skbuff.h | 3 +++ net/core/skbuff.c | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 57d7d4965f9a..3597b4f14389 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +extern struct lock_class_key skb_queue_lock_key; + static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); + lockdep_set_class(&list->lock, &skb_queue_lock_key); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7cfbdb215ba2..44f6a181a754 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -70,6 +70,13 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; static kmem_cache_t *skbuff_fclone_cache __read_mostly; +/* + * lockdep: lock class key used by skb_queue_head_init(): + */ +struct lock_class_key skb_queue_lock_key; + +EXPORT_SYMBOL(skb_queue_lock_key); + /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always -- cgit v1.2.3 From 543655244866b8ec648fea1eb9c32a35ffba5721 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:11 -0700 Subject: [PATCH] lockdep: annotate hrtimer base locks Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hrtimer.h | 1 + kernel/hrtimer.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 07d7305f131e..e4bccbcc2750 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -91,6 +91,7 @@ struct hrtimer_base { ktime_t (*get_softirq_time)(void); struct hrtimer *curr_timer; ktime_t softirq_time; + struct lock_class_key lock_key; }; /* diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 8d3dc29ef41a..617304ce67db 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -782,8 +782,10 @@ static void __devinit init_hrtimers_cpu(int cpu) struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); int i; - for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) + for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { spin_lock_init(&base->lock); + lockdep_set_class(&base->lock, &base->lock_key); + } } #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.2.3 From 366c7f554e888e51b8395f9b07b273fe775c7ff3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:25 -0700 Subject: [PATCH] lockdep: annotate enable_in_hardirq() Make use of local_irq_enable_in_hardirq() API to annotate places that enable hardirqs in hardirq context. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/nmi.c | 2 +- arch/x86_64/kernel/nmi.c | 2 +- drivers/ide/ide-floppy.c | 2 +- drivers/ide/ide-io.c | 6 +++--- drivers/ide/ide-taskfile.c | 2 +- include/linux/ide.h | 2 +- kernel/irq/handle.c | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index a76e93146585..2dd928a84645 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -107,7 +107,7 @@ int nmi_active; static __init void nmi_cpu_busy(void *data) { volatile int *endflag = data; - local_irq_enable(); + local_irq_enable_in_hardirq(); /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, even if there is a simulator or similar that catches the diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 476c1472fc07..5baa0c726e97 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -127,7 +127,7 @@ void __cpuinit nmi_watchdog_default(void) static __init void nmi_cpu_busy(void *data) { volatile int *endflag = data; - local_irq_enable(); + local_irq_enable_in_hardirq(); /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, even if there is a simulator or similar that catches the diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 6ca3476d02c7..adbe9f76a505 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -838,7 +838,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) "transferred\n", pc->actually_transferred); clear_bit(PC_DMA_IN_PROGRESS, &pc->flags); - local_irq_enable(); + local_irq_enable_in_hardirq(); if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) { /* Error detected */ diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 7dba9992ad30..d6bf1ec96322 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -693,7 +693,7 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) u8 stat = hwif->INB(IDE_STATUS_REG); int retries = 10; - local_irq_enable(); + local_irq_enable_in_hardirq(); if ((stat & DRQ_STAT) && args && args[3]) { u8 io_32bit = drive->io_32bit; drive->io_32bit = 0; @@ -1286,7 +1286,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) disable_irq_nosync(hwif->irq); spin_unlock(&ide_lock); - local_irq_enable(); + local_irq_enable_in_hardirq(); /* allow other IRQs while we start this request */ startstop = start_request(drive, rq); spin_lock_irq(&ide_lock); @@ -1631,7 +1631,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) spin_unlock(&ide_lock); if (drive->unmask) - local_irq_enable(); + local_irq_enable_in_hardirq(); /* service this interrupt, may set handler for next interrupt */ startstop = handler(drive); spin_lock_irq(&ide_lock); diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 04547eb0833f..97a9244312fc 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -222,7 +222,7 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive) ide_hwif_t *hwif = HWIF(drive); u8 stat; - local_irq_enable(); + local_irq_enable_in_hardirq(); if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) { return ide_error(drive, "task_no_data_intr", stat); /* calls ide_end_drive_cmd */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 285316c836b5..dc7abef10965 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem; * ide_drive_t->hwif: constant, no locking */ -#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0) +#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) extern struct bus_type ide_bus_type; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a7b497ee919e..fc4e906aedbd 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -132,7 +132,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, handle_dynamic_tick(action); if (!(action->flags & IRQF_DISABLED)) - local_irq_enable(); + local_irq_enable_in_hardirq(); do { ret = action->handler(irq, action->dev_id, regs); -- cgit v1.2.3 From cf51624999e56c88154b5f7d451a265db6aabff7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:27 -0700 Subject: [PATCH] lockdep: annotate ->s_lock Teach special (per-filesystem) locking code to the lock validator. Minimal effect on non-lockdep kernels: one extra parameter to alloc_super(). Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 10 ++++++++-- include/linux/fs.h | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/super.c b/fs/super.c index 9b780c42d845..5a4fe8be462a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -53,7 +53,7 @@ DEFINE_SPINLOCK(sb_lock); * Allocates and initializes a new &struct super_block. alloc_super() * returns a pointer new superblock or %NULL if allocation had failed. */ -static struct super_block *alloc_super(void) +static struct super_block *alloc_super(struct file_system_type *type) { struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); static struct super_operations default_op; @@ -72,6 +72,12 @@ static struct super_block *alloc_super(void) INIT_LIST_HEAD(&s->s_inodes); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); + /* + * The locking rules for s_lock are up to the + * filesystem. For example ext3fs has different + * lock ordering than usbfs: + */ + lockdep_set_class(&s->s_lock, &type->s_lock_key); down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); @@ -295,7 +301,7 @@ retry: } if (!s) { spin_unlock(&sb_lock); - s = alloc_super(); + s = alloc_super(type); if (!s) return ERR_PTR(-ENOMEM); goto retry; diff --git a/include/linux/fs.h b/include/linux/fs.h index 05ded9e76b23..0a3ea52d711e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1295,6 +1295,7 @@ struct file_system_type { struct module *owner; struct file_system_type * next; struct list_head fs_supers; + struct lock_class_key s_lock_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, -- cgit v1.2.3 From 897c6ff9568bcb102ffc6b465ebe1def0cba829d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 3 Jul 2006 00:25:28 -0700 Subject: [PATCH] lockdep: annotate sb ->s_umount The s_umount rwsem needs to be classified as per-superblock since it's perfectly legit to keep multiple of those recursively in the VFS locking rules. Has no effect on non-lockdep kernels. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 1 + include/linux/fs.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/fs/super.c b/fs/super.c index 5a4fe8be462a..6d4e8174b6db 100644 --- a/fs/super.c +++ b/fs/super.c @@ -72,6 +72,7 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_LIST_HEAD(&s->s_inodes); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); + lockdep_set_class(&s->s_umount, &type->s_umount_key); /* * The locking rules for s_lock are up to the * filesystem. For example ext3fs has different diff --git a/include/linux/fs.h b/include/linux/fs.h index 0a3ea52d711e..e26de68059af 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1296,6 +1296,7 @@ struct file_system_type { struct file_system_type * next; struct list_head fs_supers; struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, -- cgit v1.2.3 From 663d440eaa496db903cc58be04b9b602ba45e43b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:33 -0700 Subject: [PATCH] lockdep: annotate blkdev nesting Teach special (recursive) locking code to the lock validator. Effects on non-lockdep kernels: - the introduction of the following function variants: extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern int blkdev_put_partition(struct block_device *); static int blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); which on non-lockdep are the same as open_by_devnum(), blkdev_put() and blkdev_get(). - a subclass parameter to do_open(). [unused on non-lockdep] - a subclass parameter to __blkdev_put(), which is a new internal function for the main blkdev_put*() functions. [parameter unused on non-lockdep kernels, except for two sanity check WARN_ON()s] these functions carry no semantical difference - they only express object dependencies towards the lockdep subsystem. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 6 ++-- fs/block_dev.c | 102 +++++++++++++++++++++++++++++++++++++++++++++-------- include/linux/fs.h | 17 +++++++++ 3 files changed, 107 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2fe32c261922..e4e161372a3e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1404,7 +1404,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE); if (IS_ERR(bdev)) { printk(KERN_ERR "md: could not open %s.\n", __bdevname(dev, b)); @@ -1414,7 +1414,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) if (err) { printk(KERN_ERR "md: could not bd_claim %s.\n", bdevname(bdev, b)); - blkdev_put(bdev); + blkdev_put_partition(bdev); return err; } rdev->bdev = bdev; @@ -1428,7 +1428,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) if (!bdev) MD_BUG(); bd_release(bdev); - blkdev_put(bdev); + blkdev_put_partition(bdev); } void md_autodetect_dev(dev_t dev); diff --git a/fs/block_dev.c b/fs/block_dev.c index 9633a490dab0..37534573960b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -739,7 +739,7 @@ static int bd_claim_by_kobject(struct block_device *bdev, void *holder, if (!bo) return -ENOMEM; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION); res = bd_claim(bdev, holder); if (res || !add_bd_holder(bdev, bo)) free_bd_holder(bo); @@ -764,7 +764,7 @@ static void bd_release_from_kobject(struct block_device *bdev, if (!kobj) return; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION); bd_release(bdev); if ((bo = del_bd_holder(bdev, kobj))) free_bd_holder(bo); @@ -822,6 +822,22 @@ struct block_device *open_by_devnum(dev_t dev, unsigned mode) EXPORT_SYMBOL(open_by_devnum); +static int +blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags); + +struct block_device *open_partition_by_devnum(dev_t dev, unsigned mode) +{ + struct block_device *bdev = bdget(dev); + int err = -ENOMEM; + int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY; + if (bdev) + err = blkdev_get_partition(bdev, mode, flags); + return err ? ERR_PTR(err) : bdev; +} + +EXPORT_SYMBOL(open_partition_by_devnum); + + /* * This routine checks whether a removable media has been changed, * and invalidates all buffer-cache-entries in that case. This @@ -868,7 +884,11 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); -static int do_open(struct block_device *bdev, struct file *file) +static int +blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); + +static int +do_open(struct block_device *bdev, struct file *file, unsigned int subclass) { struct module *owner = NULL; struct gendisk *disk; @@ -885,7 +905,8 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, subclass); + if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -912,11 +933,11 @@ static int do_open(struct block_device *bdev, struct file *file) ret = -ENOMEM; if (!whole) goto out_first; - ret = blkdev_get(whole, file->f_mode, file->f_flags); + ret = blkdev_get_whole(whole, file->f_mode, file->f_flags); if (ret) goto out_first; bdev->bd_contains = whole; - mutex_lock(&whole->bd_mutex); + mutex_lock_nested(&whole->bd_mutex, BD_MUTEX_WHOLE); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = @@ -944,7 +965,8 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + BD_MUTEX_PARTITION); bdev->bd_contains->bd_part_count++; mutex_unlock(&bdev->bd_contains->bd_mutex); } @@ -985,11 +1007,49 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) fake_file.f_dentry = &fake_dentry; fake_dentry.d_inode = bdev->bd_inode; - return do_open(bdev, &fake_file); + return do_open(bdev, &fake_file, BD_MUTEX_NORMAL); } EXPORT_SYMBOL(blkdev_get); +static int +blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags) +{ + /* + * This crockload is due to bad choice of ->open() type. + * It will go away. + * For now, block device ->open() routine must _not_ + * examine anything in 'inode' argument except ->i_rdev. + */ + struct file fake_file = {}; + struct dentry fake_dentry = {}; + fake_file.f_mode = mode; + fake_file.f_flags = flags; + fake_file.f_dentry = &fake_dentry; + fake_dentry.d_inode = bdev->bd_inode; + + return do_open(bdev, &fake_file, BD_MUTEX_WHOLE); +} + +static int +blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags) +{ + /* + * This crockload is due to bad choice of ->open() type. + * It will go away. + * For now, block device ->open() routine must _not_ + * examine anything in 'inode' argument except ->i_rdev. + */ + struct file fake_file = {}; + struct dentry fake_dentry = {}; + fake_file.f_mode = mode; + fake_file.f_flags = flags; + fake_file.f_dentry = &fake_dentry; + fake_dentry.d_inode = bdev->bd_inode; + + return do_open(bdev, &fake_file, BD_MUTEX_PARTITION); +} + static int blkdev_open(struct inode * inode, struct file * filp) { struct block_device *bdev; @@ -1005,7 +1065,7 @@ static int blkdev_open(struct inode * inode, struct file * filp) bdev = bd_acquire(inode); - res = do_open(bdev, filp); + res = do_open(bdev, filp, BD_MUTEX_NORMAL); if (res) return res; @@ -1019,13 +1079,13 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -int blkdev_put(struct block_device *bdev) +static int __blkdev_put(struct block_device *bdev, unsigned int subclass) { int ret = 0; struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, subclass); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -1035,7 +1095,8 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + subclass + 1); bdev->bd_contains->bd_part_count--; mutex_unlock(&bdev->bd_contains->bd_mutex); } @@ -1051,9 +1112,8 @@ int blkdev_put(struct block_device *bdev) } bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) { - blkdev_put(bdev->bd_contains); - } + if (bdev != bdev->bd_contains) + __blkdev_put(bdev->bd_contains, subclass + 1); bdev->bd_contains = NULL; } unlock_kernel(); @@ -1062,8 +1122,20 @@ int blkdev_put(struct block_device *bdev) return ret; } +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_NORMAL); +} + EXPORT_SYMBOL(blkdev_put); +int blkdev_put_partition(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_PARTITION); +} + +EXPORT_SYMBOL(blkdev_put_partition); + static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); diff --git a/include/linux/fs.h b/include/linux/fs.h index e26de68059af..134b32068246 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -435,6 +435,21 @@ struct block_device { unsigned long bd_private; }; +/* + * bdev->bd_mutex nesting subclasses for the lock validator: + * + * 0: normal + * 1: 'whole' + * 2: 'partition' + */ +enum bdev_bd_mutex_lock_class +{ + BD_MUTEX_NORMAL, + BD_MUTEX_WHOLE, + BD_MUTEX_PARTITION +}; + + /* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees @@ -1425,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, unsigned); +extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern const struct file_operations def_blk_fops; extern const struct address_space_operations def_blk_aops; extern const struct file_operations def_chr_fops; @@ -1435,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *, mode_t, unsigned); extern int blkdev_put(struct block_device *); +extern int blkdev_put_partition(struct block_device *); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); #ifdef CONFIG_SYSFS -- cgit v1.2.3 From 36c8b586896f60cb91a4fd526233190b34316baf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:41 -0700 Subject: [PATCH] sched: cleanup, remove task_t, convert to struct task_struct cleanup: remove task_t and convert all the uses to struct task_struct. I introduced it for the scheduler anno and it was a mistake. Conversion was mostly scripted, the result was reviewed and all secondary whitespace and style impact (if any) was fixed up by hand. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/process.c | 2 +- arch/ia64/kernel/mca.c | 10 +- arch/ia64/kernel/smpboot.c | 2 +- arch/mips/kernel/entry.S | 2 +- arch/mips/kernel/mips-mt.c | 6 +- arch/um/kernel/tt/process_kern.c | 2 +- drivers/char/tty_io.c | 2 +- fs/eventpoll.c | 4 +- include/asm-ia64/thread_info.h | 2 +- include/asm-m32r/system.h | 2 +- include/asm-sh/system.h | 2 +- include/linux/sched.h | 55 +++++------ kernel/capability.c | 8 +- kernel/exit.c | 35 +++---- kernel/fork.c | 18 ++-- kernel/hrtimer.c | 2 +- kernel/pid.c | 6 +- kernel/ptrace.c | 6 +- kernel/rtmutex-debug.c | 5 +- kernel/rtmutex-tester.c | 4 +- kernel/rtmutex.c | 11 ++- kernel/sched.c | 192 ++++++++++++++++++++------------------- kernel/timer.c | 2 +- kernel/workqueue.c | 2 +- mm/oom_kill.c | 8 +- 25 files changed, 203 insertions(+), 187 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 01c8c8b23337..41ebf51a107a 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -474,7 +474,7 @@ out: */ unsigned long -thread_saved_pc(task_t *t) +thread_saved_pc(struct task_struct *t) { unsigned long base = (unsigned long)task_stack_page(t); unsigned long fp, sp = task_thread_info(t)->pcb.ksp; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index eb8e8dc5ac8e..2fbe4536fe18 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -678,7 +678,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) */ static void -ia64_mca_modify_comm(const task_t *previous_current) +ia64_mca_modify_comm(const struct task_struct *previous_current) { char *p, comm[sizeof(current->comm)]; if (previous_current->pid) @@ -709,7 +709,7 @@ ia64_mca_modify_comm(const task_t *previous_current) * that we can do backtrace on the MCA/INIT handler code itself. */ -static task_t * +static struct task_struct * ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, @@ -719,7 +719,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const pal_min_state_area_t *ms = sos->pal_min_state; - task_t *previous_current; + struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + @@ -1023,7 +1023,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &sos->proc_state_param; int recover, cpu = smp_processor_id(); - task_t *previous_current; + struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; @@ -1352,7 +1352,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, { static atomic_t slaves; static atomic_t monarchs; - task_t *previous_current; + struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e1960979be29..6203ed4ec8cf 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -124,7 +124,7 @@ extern void __devinit calibrate_delay (void); extern void start_ap (void); extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu; /* * State for each CPU diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ecfd637d702a..01e7fa86aa43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -65,7 +65,7 @@ need_resched: #endif FEXPORT(ret_from_fork) - jal schedule_tail # a0 = task_t *prev + jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) local_irq_disable # make sure need_resched and diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 02237a685ec7..4dcc39f42951 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0; * used in sys_sched_set/getaffinity() in kernel/sched.c, so * cloned here. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, cpumask_t new_mask; cpumask_t effective_mask; int retval; - task_t *p; + struct task_struct *p; if (len < sizeof(new_mask)) return -EINVAL; @@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned int real_len; cpumask_t mask; int retval; - task_t *p; + struct task_struct *p; real_len = sizeof(mask); if (len < real_len) diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c index a9c1443fc548..8368c2dbe635 100644 --- a/arch/um/kernel/tt/process_kern.c +++ b/arch/um/kernel/tt/process_kern.c @@ -119,7 +119,7 @@ void suspend_new_thread(int fd) panic("read failed in suspend_new_thread, err = %d", -err); } -void schedule_tail(task_t *prev); +void schedule_tail(struct task_struct *prev); static void new_thread_handler(int sig) { diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6fb77952562d..bfdb90242a90 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -2336,7 +2336,7 @@ static int fionbio(struct file *file, int __user *p) static int tiocsctty(struct tty_struct *tty, int arg) { - task_t *p; + struct task_struct *p; if (current->signal->leader && (current->signal->session == tty->session)) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9c677bbd0b08..19ffb043abbc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -120,7 +120,7 @@ struct epoll_filefd { */ struct wake_task_node { struct list_head llink; - task_t *task; + struct task_struct *task; wait_queue_head_t *wq; }; @@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) { int wake_nests = 0; unsigned long flags; - task_t *this_task = current; + struct task_struct *this_task = current; struct list_head *lsthead = &psw->wake_task_list, *lnk; struct wake_task_node *tncur; struct wake_task_node tnode; diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8bc9869e5765..8adcde0934ca 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -68,7 +68,7 @@ struct thread_info { #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #endif /* !__ASSEMBLY */ diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 66c4742f09e7..311cebf44eff 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -18,7 +18,7 @@ * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. * - * `next' and `prev' should be task_t, but it isn't always defined + * `next' and `prev' should be struct task_struct, but it isn't always defined */ #define switch_to(prev, next, last) do { \ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index b752e5cbb830..ce2e60664a86 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -12,7 +12,7 @@ */ #define switch_to(prev, next, last) do { \ - task_t *__last; \ + struct task_struct *__last; \ register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8ebddba4448d..c2797f04d931 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; +struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -383,7 +383,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - task_t *curr_target; + struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct*); +extern void prefetch_stack(struct task_struct *t); #else static inline void prefetch_stack(struct task_struct *t) { } #endif @@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t) #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(task_t *p); -extern void rt_mutex_setprio(task_t *p, int prio); -extern void rt_mutex_adjust_pi(task_t *p); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); #else -static inline int rt_mutex_getprio(task_t *p) +static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); @@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p); #else #define wait_task_inactive(p) do { } while (0) #endif @@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p); /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); + struct task_struct, thread_group); } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } diff --git a/kernel/capability.c b/kernel/capability.c index 1a4d8a40d3f9..c7685ad00a97 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) int ret = 0; pid_t pid; __u32 version; - task_t *target; + struct task_struct *target; struct __user_cap_data_struct data; if (get_user(version, &header->version)) @@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) { kernel_cap_t inheritable, permitted, effective; __u32 version; - task_t *target; + struct task_struct *target; int ret; pid_t pid; diff --git a/kernel/exit.c b/kernel/exit.c index c595db14cf25..6664c084783d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -134,8 +134,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp) void release_task(struct task_struct * p) { + struct task_struct *leader; int zap_leader; - task_t *leader; repeat: atomic_dec(&p->user->processes); write_lock_irq(&tasklist_lock); @@ -209,7 +209,7 @@ out: * * "I ask you, have you ever known what it is to be an orphan?" */ -static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) +static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) { struct task_struct *p; int ret = 1; @@ -582,7 +582,8 @@ static void exit_mm(struct task_struct * tsk) mmput(mm); } -static inline void choose_new_parent(task_t *p, task_t *reaper) +static inline void +choose_new_parent(struct task_struct *p, struct task_struct *reaper) { /* * Make sure we're not reparenting to ourselves and that @@ -592,7 +593,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper) p->real_parent = reaper; } -static void reparent_thread(task_t *p, task_t *father, int traced) +static void +reparent_thread(struct task_struct *p, struct task_struct *father, int traced) { /* We don't want people slaying init. */ if (p->exit_signal != -1) @@ -656,8 +658,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced) * group, and if no such member exists, give it to * the global child reaper process (ie "init") */ -static void forget_original_parent(struct task_struct * father, - struct list_head *to_release) +static void +forget_original_parent(struct task_struct *father, struct list_head *to_release) { struct task_struct *p, *reaper = father; struct list_head *_p, *_n; @@ -680,7 +682,7 @@ static void forget_original_parent(struct task_struct * father, */ list_for_each_safe(_p, _n, &father->children) { int ptrace; - p = list_entry(_p,struct task_struct,sibling); + p = list_entry(_p, struct task_struct, sibling); ptrace = p->ptrace; @@ -709,7 +711,7 @@ static void forget_original_parent(struct task_struct * father, list_add(&p->ptrace_list, to_release); } list_for_each_safe(_p, _n, &father->ptrace_children) { - p = list_entry(_p,struct task_struct,ptrace_list); + p = list_entry(_p, struct task_struct, ptrace_list); choose_new_parent(p, reaper); reparent_thread(p, father, 1); } @@ -829,7 +831,7 @@ static void exit_notify(struct task_struct *tsk) list_for_each_safe(_p, _n, &ptrace_dead) { list_del_init(_p); - t = list_entry(_p,struct task_struct,ptrace_list); + t = list_entry(_p, struct task_struct, ptrace_list); release_task(t); } @@ -1010,7 +1012,7 @@ asmlinkage void sys_exit_group(int error_code) do_group_exit((error_code & 0xff) << 8); } -static int eligible_child(pid_t pid, int options, task_t *p) +static int eligible_child(pid_t pid, int options, struct task_struct *p) { if (pid > 0) { if (p->pid != pid) @@ -1051,12 +1053,13 @@ static int eligible_child(pid_t pid, int options, task_t *p) return 1; } -static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, +static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) { int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; + put_task_struct(p); if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); @@ -1081,7 +1084,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(task_t *p, int noreap, +static int wait_task_zombie(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1243,8 +1246,8 @@ static int wait_task_zombie(task_t *p, int noreap, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, - struct siginfo __user *infop, +static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, + int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; @@ -1358,7 +1361,7 @@ bail_ref: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(task_t *p, int noreap, +static int wait_task_continued(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1444,7 +1447,7 @@ repeat: int ret; list_for_each(_p,&tsk->children) { - p = list_entry(_p,struct task_struct,sibling); + p = list_entry(_p, struct task_struct, sibling); ret = eligible_child(pid, options, p); if (!ret) diff --git a/kernel/fork.c b/kernel/fork.c index 54953d8a6f17..56e4e07e45f7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,13 +933,13 @@ static inline void rt_mutex_init_task(struct task_struct *p) * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ -static task_t *copy_process(unsigned long clone_flags, - unsigned long stack_start, - struct pt_regs *regs, - unsigned long stack_size, - int __user *parent_tidptr, - int __user *child_tidptr, - int pid) +static struct task_struct *copy_process(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *parent_tidptr, + int __user *child_tidptr, + int pid) { int retval; struct task_struct *p = NULL; @@ -1294,9 +1294,9 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) return regs; } -task_t * __devinit fork_idle(int cpu) +struct task_struct * __devinit fork_idle(int cpu) { - task_t *task; + struct task_struct *task; struct pt_regs regs; task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 617304ce67db..d17766d40dab 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer *timer) return HRTIMER_NORESTART; } -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; sl->task = task; diff --git a/kernel/pid.c b/kernel/pid.c index eeb836b65ca4..93e212f20671 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr) return NULL; } -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) { struct pid_link *link; struct pid *pid; @@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr) return 0; } -void fastcall detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(struct task_struct *task, enum pid_type type) { struct pid_link *link; struct pid *pid; @@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -task_t *find_task_by_pid_type(int type, int nr) +struct task_struct *find_task_by_pid_type(int type, int nr) { return pid_task(find_pid(nr), type); } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335c5b932e14..9a111f70145c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -28,7 +28,7 @@ * * Must be called with the tasklist lock write-held. */ -void __ptrace_link(task_t *child, task_t *new_parent) +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_list)); if (child->parent == new_parent) @@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t *new_parent) * TASK_TRACED, resume it now. * Requires that irqs be disabled. */ -void ptrace_untrace(task_t *child) +void ptrace_untrace(struct task_struct *child) { spin_lock(&child->sighand->siglock); if (child->state == TASK_TRACED) { @@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child) * * Must be called with the tasklist lock write-held. */ -void __ptrace_unlink(task_t *child) +void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 353a853bc390..0c1faa950af7 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -96,7 +96,7 @@ void deadlock_trace_off(void) rt_trace_on = 0; } -static void printk_task(task_t *p) +static void printk_task(struct task_struct *p) { if (p) printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); @@ -231,7 +231,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) lock->name = name; } -void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) +void +rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) { } diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index e82c2f848249..494dac872a13 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -33,7 +33,7 @@ struct test_thread_data { }; static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; -static task_t *threads[MAX_RT_TEST_THREADS]; +static struct task_struct *threads[MAX_RT_TEST_THREADS]; static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; enum test_opcodes { @@ -361,8 +361,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) { struct test_thread_data *td; + struct task_struct *tsk; char *curr = buf; - task_t *tsk; int i; td = container_of(dev, struct test_thread_data, sysdev); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 91b699aa658b..d2ef13b485e7 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -157,7 +157,7 @@ int max_lock_depth = 1024; * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ -static int rt_mutex_adjust_prio_chain(task_t *task, +static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, @@ -282,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); + return ret; } @@ -403,10 +404,10 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int detect_deadlock) { + struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - task_t *owner = rt_mutex_owner(lock); - int boost = 0, res; unsigned long flags; + int boost = 0, res; spin_lock_irqsave(¤t->pi_lock, flags); __rt_mutex_adjust_prio(current); @@ -527,9 +528,9 @@ static void remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); - int boost = 0; - task_t *owner = rt_mutex_owner(lock); + struct task_struct *owner = rt_mutex_owner(lock); unsigned long flags; + int boost = 0; spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); diff --git a/kernel/sched.c b/kernel/sched.c index b0326141f841..021b31219516 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -179,7 +179,7 @@ static unsigned int static_prio_timeslice(int static_prio) return SCALE_PRIO(DEF_TIMESLICE, static_prio); } -static inline unsigned int task_timeslice(task_t *p) +static inline unsigned int task_timeslice(struct task_struct *p) { return static_prio_timeslice(p->static_prio); } @@ -227,7 +227,7 @@ struct runqueue { unsigned long expired_timestamp; unsigned long long timestamp_last_tick; - task_t *curr, *idle; + struct task_struct *curr, *idle; struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int best_expired_prio; @@ -240,7 +240,7 @@ struct runqueue { int active_balance; int push_cpu; - task_t *migration_thread; + struct task_struct *migration_thread; struct list_head migration_queue; #endif @@ -291,16 +291,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { return rq->curr == p; } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ @@ -317,7 +317,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; @@ -326,7 +326,7 @@ static inline int task_running(runqueue_t *rq, task_t *p) #endif } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* @@ -343,7 +343,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) #endif } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* @@ -364,7 +364,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */ -static inline runqueue_t *__task_rq_lock(task_t *p) +static inline runqueue_t *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct runqueue *rq; @@ -384,7 +384,7 @@ repeat_lock_task: * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq; @@ -541,7 +541,7 @@ static inline runqueue_t *this_rq_lock(void) * long it was from the *first* time it was queued to the time that it * finally hit a cpu. */ -static inline void sched_info_dequeued(task_t *t) +static inline void sched_info_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; } @@ -551,7 +551,7 @@ static inline void sched_info_dequeued(task_t *t) * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static void sched_info_arrive(task_t *t) +static void sched_info_arrive(struct task_struct *t) { unsigned long now = jiffies, diff = 0; struct runqueue *rq = task_rq(t); @@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t) * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */ -static inline void sched_info_queued(task_t *t) +static inline void sched_info_queued(struct task_struct *t) { if (!t->sched_info.last_queued) t->sched_info.last_queued = jiffies; @@ -595,7 +595,7 @@ static inline void sched_info_queued(task_t *t) * Called when a process ceases being the active-running process, either * voluntarily or involuntarily. Now we can calculate how long we ran. */ -static inline void sched_info_depart(task_t *t) +static inline void sched_info_depart(struct task_struct *t) { struct runqueue *rq = task_rq(t); unsigned long diff = jiffies - t->sched_info.last_arrival; @@ -611,7 +611,8 @@ static inline void sched_info_depart(task_t *t) * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */ -static inline void sched_info_switch(task_t *prev, task_t *next) +static inline void +sched_info_switch(struct task_struct *prev, struct task_struct *next) { struct runqueue *rq = task_rq(prev); @@ -683,7 +684,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) * Both properties are important to certain workloads. */ -static inline int __normal_prio(task_t *p) +static inline int __normal_prio(struct task_struct *p) { int bonus, prio; @@ -719,7 +720,7 @@ static inline int __normal_prio(task_t *p) #define RTPRIO_TO_LOAD_WEIGHT(rp) \ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) -static void set_load_weight(task_t *p) +static void set_load_weight(struct task_struct *p) { if (has_rt_policy(p)) { #ifdef CONFIG_SMP @@ -737,23 +738,25 @@ static void set_load_weight(task_t *p) p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); } -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load += p->load_weight; } -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load -= p->load_weight; } -static inline void inc_nr_running(task_t *p, runqueue_t *rq) +static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running++; inc_raw_weighted_load(rq, p); } -static inline void dec_nr_running(task_t *p, runqueue_t *rq) +static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); @@ -766,7 +769,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq) * setprio syscalls, and whenever the interactivity * estimator recalculates. */ -static inline int normal_prio(task_t *p) +static inline int normal_prio(struct task_struct *p) { int prio; @@ -784,7 +787,7 @@ static inline int normal_prio(task_t *p) * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */ -static int effective_prio(task_t *p) +static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* @@ -800,7 +803,7 @@ static int effective_prio(task_t *p) /* * __activate_task - move a task to the runqueue. */ -static void __activate_task(task_t *p, runqueue_t *rq) +static void __activate_task(struct task_struct *p, runqueue_t *rq) { prio_array_t *target = rq->active; @@ -813,7 +816,7 @@ static void __activate_task(task_t *p, runqueue_t *rq) /* * __activate_idle_task - move idle task to the _front_ of runqueue. */ -static inline void __activate_idle_task(task_t *p, runqueue_t *rq) +static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); inc_nr_running(p, rq); @@ -823,7 +826,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq) * Recalculate p->normal_prio and p->prio after having slept, * updating the sleep-average too: */ -static int recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(struct task_struct *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long sleep_time = now - p->timestamp; @@ -895,7 +898,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now) * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ -static void activate_task(task_t *p, runqueue_t *rq, int local) +static void activate_task(struct task_struct *p, runqueue_t *rq, int local) { unsigned long long now; @@ -962,7 +965,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void resched_task(task_t *p) +static void resched_task(struct task_struct *p) { int cpu; @@ -983,7 +986,7 @@ static void resched_task(task_t *p) smp_send_reschedule(cpu); } #else -static inline void resched_task(task_t *p) +static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); @@ -994,7 +997,7 @@ static inline void resched_task(task_t *p) * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ -inline int task_curr(const task_t *p) +inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } @@ -1009,7 +1012,7 @@ unsigned long weighted_cpuload(const int cpu) typedef struct { struct list_head list; - task_t *task; + struct task_struct *task; int dest_cpu; struct completion done; @@ -1019,7 +1022,8 @@ typedef struct { * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) +static int +migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) { runqueue_t *rq = task_rq(p); @@ -1049,7 +1053,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ -void wait_task_inactive(task_t *p) +void wait_task_inactive(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1083,7 +1087,7 @@ repeat: * to another CPU then no harm is done and the purpose has been * achieved as well. */ -void kick_process(task_t *p) +void kick_process(struct task_struct *p) { int cpu; @@ -1286,7 +1290,7 @@ nextlevel: * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, task_t *p) +static int wake_idle(int cpu, struct task_struct *p) { cpumask_t tmp; struct sched_domain *sd; @@ -1309,7 +1313,7 @@ static int wake_idle(int cpu, task_t *p) return cpu; } #else -static inline int wake_idle(int cpu, task_t *p) +static inline int wake_idle(int cpu, struct task_struct *p) { return cpu; } @@ -1329,7 +1333,7 @@ static inline int wake_idle(int cpu, task_t *p) * * returns failure only if the task is already active. */ -static int try_to_wake_up(task_t *p, unsigned int state, int sync) +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) { int cpu, this_cpu, success = 0; unsigned long flags; @@ -1487,14 +1491,14 @@ out: return success; } -int fastcall wake_up_process(task_t *p) +int fastcall wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -1503,7 +1507,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state) * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void fastcall sched_fork(task_t *p, int clone_flags) +void fastcall sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); @@ -1571,7 +1575,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) +void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; int this_cpu, cpu; @@ -1655,7 +1659,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ -void fastcall sched_exit(task_t *p) +void fastcall sched_exit(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1689,7 +1693,7 @@ void fastcall sched_exit(task_t *p) * prepare_task_switch sets up locking and calls architecture specific * hooks. */ -static inline void prepare_task_switch(runqueue_t *rq, task_t *next) +static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) { prepare_lock_switch(rq, next); prepare_arch_switch(next); @@ -1710,7 +1714,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next) * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(runqueue_t *rq, task_t *prev) +static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; @@ -1748,7 +1752,7 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ -asmlinkage void schedule_tail(task_t *prev) +asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { runqueue_t *rq = this_rq(); @@ -1765,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev) * context_switch - switch to the new MM and the new * thread's register state. */ -static inline -task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) +static inline struct task_struct * +context_switch(runqueue_t *rq, struct task_struct *prev, + struct task_struct *next) { struct mm_struct *mm = next->mm; struct mm_struct *oldmm = prev->active_mm; @@ -1937,7 +1942,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ -static void sched_migrate_task(task_t *p, int dest_cpu) +static void sched_migrate_task(struct task_struct *p, int dest_cpu) { migration_req_t req; runqueue_t *rq; @@ -1952,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu) if (migrate_task(p, dest_cpu, &req)) { /* Need to wait for migration thread (might exit: take ref). */ struct task_struct *mt = rq->migration_thread; + get_task_struct(mt); task_rq_unlock(rq, &flags); wake_up_process(mt); put_task_struct(mt); wait_for_completion(&req.done); + return; } out: @@ -1980,9 +1987,9 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static -void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, - runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, + struct task_struct *p, runqueue_t *this_rq, + prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); dec_nr_running(p, src_rq); @@ -2003,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static -int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, +int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { @@ -2052,8 +2059,8 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, best_prio_seen, skip_for_load; prio_array_t *array, *dst_array; struct list_head *head, *curr; + struct task_struct *tmp; long rem_load_move; - task_t *tmp; if (max_nr_move == 0 || max_load_move == 0) goto out; @@ -2105,7 +2112,7 @@ skip_bitmap: head = array->queue + idx; curr = head->prev; skip_queue: - tmp = list_entry(curr, task_t, run_list); + tmp = list_entry(curr, struct task_struct, run_list); curr = curr->prev; @@ -2819,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat); * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void -update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) +update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) { p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); } @@ -2828,7 +2835,7 @@ update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) * Return current->sched_time plus any more ns on the sched_clock * that have not yet been banked. */ -unsigned long long current_sched_time(const task_t *p) +unsigned long long current_sched_time(const struct task_struct *p) { unsigned long long ns; unsigned long flags; @@ -2945,9 +2952,9 @@ void account_steal_time(struct task_struct *p, cputime_t steal) void scheduler_tick(void) { unsigned long long now = sched_clock(); + struct task_struct *p = current; int cpu = smp_processor_id(); runqueue_t *rq = this_rq(); - task_t *p = current; update_cpu_clock(p, rq, now); @@ -3079,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu) * utilize, if another task runs on a sibling. This models the * slowdown effect of other tasks running on siblings: */ -static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) +static inline unsigned long +smt_slice(struct task_struct *p, struct sched_domain *sd) { return p->time_slice * (100 - sd->per_cpu_gain) / 100; } @@ -3090,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) * acquire their lock. As we only trylock the normal locking order does not * need to be obeyed. */ -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +static int +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { struct sched_domain *tmp, *sd = NULL; int ret = 0, i; @@ -3110,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) return 0; for_each_cpu_mask(i, sd->span) { + struct task_struct *smt_curr; runqueue_t *smt_rq; - task_t *smt_curr; if (i == this_cpu) continue; @@ -3157,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu) { } static inline int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { return 0; } @@ -3211,11 +3220,11 @@ static inline int interactive_sleep(enum sleep_type sleep_type) */ asmlinkage void __sched schedule(void) { + struct task_struct *prev, *next; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; - task_t *prev, *next; prio_array_t *array; long *switch_count; runqueue_t *rq; @@ -3308,7 +3317,7 @@ need_resched_nonpreemptible: idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; - next = list_entry(queue->next, task_t, run_list); + next = list_entry(queue->next, struct task_struct, run_list); if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; @@ -3776,7 +3785,7 @@ EXPORT_SYMBOL(sleep_on_timeout); * * Used by the rt_mutex code to implement priority inheritance logic. */ -void rt_mutex_setprio(task_t *p, int prio) +void rt_mutex_setprio(struct task_struct *p, int prio) { unsigned long flags; prio_array_t *array; @@ -3817,7 +3826,7 @@ void rt_mutex_setprio(task_t *p, int prio) #endif -void set_user_nice(task_t *p, long nice) +void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta; unsigned long flags; @@ -3873,7 +3882,7 @@ EXPORT_SYMBOL(set_user_nice); * @p: task * @nice: nice value */ -int can_nice(const task_t *p, const int nice) +int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; @@ -3932,7 +3941,7 @@ asmlinkage long sys_nice(int increment) * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ -int task_prio(const task_t *p) +int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } @@ -3941,7 +3950,7 @@ int task_prio(const task_t *p) * task_nice - return the nice value of a given task. * @p: the task in question. */ -int task_nice(const task_t *p) +int task_nice(const struct task_struct *p) { return TASK_NICE(p); } @@ -3960,7 +3969,7 @@ int idle_cpu(int cpu) * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ -task_t *idle_task(int cpu) +struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } @@ -3969,7 +3978,7 @@ task_t *idle_task(int cpu) * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -4103,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { - int retval; struct sched_param lparam; struct task_struct *p; + int retval; if (!param || pid < 0) return -EINVAL; @@ -4121,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) read_unlock_irq(&tasklist_lock); retval = sched_setscheduler(p, policy, &lparam); put_task_struct(p); + return retval; } @@ -4156,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) */ asmlinkage long sys_sched_getscheduler(pid_t pid) { + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4184,8 +4194,8 @@ out_nounlock: asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (!param || pid < 0) goto out_nounlock; @@ -4218,9 +4228,9 @@ out_unlock: long sched_setaffinity(pid_t pid, cpumask_t new_mask) { - task_t *p; - int retval; cpumask_t cpus_allowed; + struct task_struct *p; + int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4306,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; long sched_getaffinity(pid_t pid, cpumask_t *mask) { + struct task_struct *p; int retval; - task_t *p; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4592,9 +4602,9 @@ asmlinkage long sys_sched_get_priority_min(int policy) asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { + struct task_struct *p; int retval = -EINVAL; struct timespec t; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4641,12 +4651,13 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) return list_entry(p->sibling.next,struct task_struct,sibling); } -static void show_task(task_t *p) +static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + +static void show_task(struct task_struct *p) { - task_t *relative; - unsigned state; + struct task_struct *relative; unsigned long free = 0; - static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + unsigned state; printk("%-13.13s ", p->comm); state = p->state ? __ffs(p->state) + 1 : 0; @@ -4697,7 +4708,7 @@ static void show_task(task_t *p) void show_state(void) { - task_t *g, *p; + struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -4730,7 +4741,7 @@ void show_state(void) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void __devinit init_idle(task_t *idle, int cpu) +void __devinit init_idle(struct task_struct *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); unsigned long flags; @@ -4793,7 +4804,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ -int set_cpus_allowed(task_t *p, cpumask_t new_mask) +int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { unsigned long flags; migration_req_t req; @@ -5061,7 +5072,7 @@ void idle_task_exit(void) mmdrop(mm); } -static void migrate_dead(unsigned int dead_cpu, task_t *p) +static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct runqueue *rq = cpu_rq(dead_cpu); @@ -5096,9 +5107,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu) struct list_head *list = &rq->arrays[arr].queue[i]; while (!list_empty(list)) - migrate_dead(dead_cpu, - list_entry(list->next, task_t, - run_list)); + migrate_dead(dead_cpu, list_entry(list->next, + struct task_struct, run_list)); } } } @@ -6801,7 +6811,7 @@ void normalize_rt_tasks(void) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -task_t *curr_task(int cpu) +struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } @@ -6821,7 +6831,7 @@ task_t *curr_task(int cpu) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -void set_curr_task(int cpu, task_t *p) +void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } diff --git a/kernel/timer.c b/kernel/timer.c index b761898d04c8..396a3c024c2c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void) static void process_timeout(unsigned long __data) { - wake_up_process((task_t *)__data); + wake_up_process((struct task_struct *)__data); } /** diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 59f0b42bd89e..90d2c6001659 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,7 +51,7 @@ struct cpu_workqueue_struct { wait_queue_head_t work_done; struct workqueue_struct *wq; - task_t *thread; + struct task_struct *thread; int run_depth; /* Detect run_workqueue() recursion depth */ } ____cacheline_aligned; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d46ed0f1dc06..b9af136e5cfa 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -225,7 +225,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * we select a process with CAP_SYS_RAW_IO set). */ -static void __oom_kill_task(task_t *p, const char *message) +static void __oom_kill_task(struct task_struct *p, const char *message) { if (p->pid == 1) { WARN_ON(1); @@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, const char *message) force_sig(SIGKILL, p); } -static int oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(struct task_struct *p, const char *message) { struct mm_struct *mm; - task_t * g, * q; + struct task_struct *g, *q; mm = p->mm; @@ -316,7 +316,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points, */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - task_t *p; + struct task_struct *p; unsigned long points = 0; if (printk_ratelimit()) { -- cgit v1.2.3 From 70b97a7f0b19cf1f2619deb5cc41e8b78c591aa7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:42 -0700 Subject: [PATCH] sched: cleanup, convert sched.c-internal typedefs to struct convert: - runqueue_t to 'struct rq' - prio_array_t to 'struct prio_array' - migration_req_t to 'struct migration_req' I was the one who added these but they are both against the kernel coding style and also were used inconsistently at places. So just get rid of them at once, now that we are flushing the scheduler patch-queue anyway. Conversion was mostly scripted, the result was reviewed and all secondary whitespace and style impact (if any) was fixed up by hand. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 5 +- kernel/sched.c | 250 +++++++++++++++++++++++++------------------------- 2 files changed, 128 insertions(+), 127 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index c2797f04d931..1c876e27ff93 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) -typedef struct prio_array prio_array_t; struct backing_dev_info; struct reclaim_state; @@ -715,6 +714,8 @@ enum sleep_type { SLEEP_INTERRUPTED, }; +struct prio_array; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ struct thread_info *thread_info; @@ -732,7 +733,7 @@ struct task_struct { int load_weight; /* for niceness load balancing purposes */ int prio, static_prio, normal_prio; struct list_head run_list; - prio_array_t *array; + struct prio_array *array; unsigned short ioprio; unsigned int btrace_seq; diff --git a/kernel/sched.c b/kernel/sched.c index 021b31219516..4ee400f9d56b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -188,8 +188,6 @@ static inline unsigned int task_timeslice(struct task_struct *p) * These are the runqueue data structures: */ -typedef struct runqueue runqueue_t; - struct prio_array { unsigned int nr_active; DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ @@ -203,7 +201,7 @@ struct prio_array { * (such as the load balancing or the thread migration code), lock * acquire operations must be ordered by ascending &runqueue. */ -struct runqueue { +struct rq { spinlock_t lock; /* @@ -229,7 +227,7 @@ struct runqueue { unsigned long long timestamp_last_tick; struct task_struct *curr, *idle; struct mm_struct *prev_mm; - prio_array_t *active, *expired, arrays[2]; + struct prio_array *active, *expired, arrays[2]; int best_expired_prio; atomic_t nr_iowait; @@ -266,7 +264,7 @@ struct runqueue { struct lock_class_key rq_lock_key; }; -static DEFINE_PER_CPU(struct runqueue, runqueues); +static DEFINE_PER_CPU(struct rq, runqueues); /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. @@ -291,16 +289,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, struct task_struct *p) +static inline int task_running(struct rq *rq, struct task_struct *p) { return rq->curr == p; } -static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { } -static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ @@ -317,7 +315,7 @@ static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, struct task_struct *p) +static inline int task_running(struct rq *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; @@ -326,7 +324,7 @@ static inline int task_running(runqueue_t *rq, struct task_struct *p) #endif } -static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* @@ -343,7 +341,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) #endif } -static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* @@ -364,10 +362,10 @@ static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */ -static inline runqueue_t *__task_rq_lock(struct task_struct *p) +static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { - struct runqueue *rq; + struct rq *rq; repeat_lock_task: rq = task_rq(p); @@ -384,10 +382,10 @@ repeat_lock_task: * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) +static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { - struct runqueue *rq; + struct rq *rq; repeat_lock_task: local_irq_save(*flags); @@ -400,13 +398,13 @@ repeat_lock_task: return rq; } -static inline void __task_rq_unlock(runqueue_t *rq) +static inline void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { spin_unlock(&rq->lock); } -static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) +static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) __releases(rq->lock) { spin_unlock_irqrestore(&rq->lock, *flags); @@ -426,7 +424,7 @@ static int show_schedstat(struct seq_file *seq, void *v) seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); seq_printf(seq, "timestamp %lu\n", jiffies); for_each_online_cpu(cpu) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_SMP struct sched_domain *sd; int dcnt = 0; @@ -513,10 +511,10 @@ struct file_operations proc_schedstat_operations = { /* * rq_lock - lock a given runqueue and disable interrupts. */ -static inline runqueue_t *this_rq_lock(void) +static inline struct rq *this_rq_lock(void) __acquires(rq->lock) { - runqueue_t *rq; + struct rq *rq; local_irq_disable(); rq = this_rq(); @@ -554,7 +552,7 @@ static inline void sched_info_dequeued(struct task_struct *t) static void sched_info_arrive(struct task_struct *t) { unsigned long now = jiffies, diff = 0; - struct runqueue *rq = task_rq(t); + struct rq *rq = task_rq(t); if (t->sched_info.last_queued) diff = now - t->sched_info.last_queued; @@ -597,7 +595,7 @@ static inline void sched_info_queued(struct task_struct *t) */ static inline void sched_info_depart(struct task_struct *t) { - struct runqueue *rq = task_rq(t); + struct rq *rq = task_rq(t); unsigned long diff = jiffies - t->sched_info.last_arrival; t->sched_info.cpu_time += diff; @@ -614,7 +612,7 @@ static inline void sched_info_depart(struct task_struct *t) static inline void sched_info_switch(struct task_struct *prev, struct task_struct *next) { - struct runqueue *rq = task_rq(prev); + struct rq *rq = task_rq(prev); /* * prev now departs the cpu. It's not interesting to record @@ -635,7 +633,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) /* * Adding/removing a task to/from a priority array: */ -static void dequeue_task(struct task_struct *p, prio_array_t *array) +static void dequeue_task(struct task_struct *p, struct prio_array *array) { array->nr_active--; list_del(&p->run_list); @@ -643,7 +641,7 @@ static void dequeue_task(struct task_struct *p, prio_array_t *array) __clear_bit(p->prio, array->bitmap); } -static void enqueue_task(struct task_struct *p, prio_array_t *array) +static void enqueue_task(struct task_struct *p, struct prio_array *array) { sched_info_queued(p); list_add_tail(&p->run_list, array->queue + p->prio); @@ -656,12 +654,13 @@ static void enqueue_task(struct task_struct *p, prio_array_t *array) * Put task to the end of the run list without the overhead of dequeue * followed by enqueue. */ -static void requeue_task(struct task_struct *p, prio_array_t *array) +static void requeue_task(struct task_struct *p, struct prio_array *array) { list_move_tail(&p->run_list, array->queue + p->prio); } -static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) +static inline void +enqueue_task_head(struct task_struct *p, struct prio_array *array) { list_add(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); @@ -739,24 +738,24 @@ static void set_load_weight(struct task_struct *p) } static inline void -inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) +inc_raw_weighted_load(struct rq *rq, const struct task_struct *p) { rq->raw_weighted_load += p->load_weight; } static inline void -dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) +dec_raw_weighted_load(struct rq *rq, const struct task_struct *p) { rq->raw_weighted_load -= p->load_weight; } -static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) +static inline void inc_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running++; inc_raw_weighted_load(rq, p); } -static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) +static inline void dec_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); @@ -803,9 +802,9 @@ static int effective_prio(struct task_struct *p) /* * __activate_task - move a task to the runqueue. */ -static void __activate_task(struct task_struct *p, runqueue_t *rq) +static void __activate_task(struct task_struct *p, struct rq *rq) { - prio_array_t *target = rq->active; + struct prio_array *target = rq->active; if (batch_task(p)) target = rq->expired; @@ -816,7 +815,7 @@ static void __activate_task(struct task_struct *p, runqueue_t *rq) /* * __activate_idle_task - move idle task to the _front_ of runqueue. */ -static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) +static inline void __activate_idle_task(struct task_struct *p, struct rq *rq) { enqueue_task_head(p, rq->active); inc_nr_running(p, rq); @@ -898,7 +897,7 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now) * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ -static void activate_task(struct task_struct *p, runqueue_t *rq, int local) +static void activate_task(struct task_struct *p, struct rq *rq, int local) { unsigned long long now; @@ -906,7 +905,7 @@ static void activate_task(struct task_struct *p, runqueue_t *rq, int local) #ifdef CONFIG_SMP if (!local) { /* Compensate for drifting sched_clock */ - runqueue_t *this_rq = this_rq(); + struct rq *this_rq = this_rq(); now = (now - this_rq->timestamp_last_tick) + rq->timestamp_last_tick; } @@ -945,7 +944,7 @@ static void activate_task(struct task_struct *p, runqueue_t *rq, int local) /* * deactivate_task - remove a task from the runqueue. */ -static void deactivate_task(struct task_struct *p, runqueue_t *rq) +static void deactivate_task(struct task_struct *p, struct rq *rq) { dec_nr_running(p, rq); dequeue_task(p, p->array); @@ -1009,23 +1008,23 @@ unsigned long weighted_cpuload(const int cpu) } #ifdef CONFIG_SMP -typedef struct { +struct migration_req { struct list_head list; struct task_struct *task; int dest_cpu; struct completion done; -} migration_req_t; +}; /* * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ static int -migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) +migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) { - runqueue_t *rq = task_rq(p); + struct rq *rq = task_rq(p); /* * If the task is not on a runqueue (and not running), then @@ -1056,7 +1055,7 @@ migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) void wait_task_inactive(struct task_struct *p) { unsigned long flags; - runqueue_t *rq; + struct rq *rq; int preempted; repeat: @@ -1107,7 +1106,7 @@ void kick_process(struct task_struct *p) */ static inline unsigned long source_load(int cpu, int type) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); if (type == 0) return rq->raw_weighted_load; @@ -1121,7 +1120,7 @@ static inline unsigned long source_load(int cpu, int type) */ static inline unsigned long target_load(int cpu, int type) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); if (type == 0) return rq->raw_weighted_load; @@ -1134,7 +1133,7 @@ static inline unsigned long target_load(int cpu, int type) */ static inline unsigned long cpu_avg_load_per_task(int cpu) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); unsigned long n = rq->nr_running; return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; @@ -1338,10 +1337,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) int cpu, this_cpu, success = 0; unsigned long flags; long old_state; - runqueue_t *rq; + struct rq *rq; #ifdef CONFIG_SMP - unsigned long load, this_load; struct sched_domain *sd, *this_sd = NULL; + unsigned long load, this_load; int new_cpu; #endif @@ -1577,9 +1576,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags) */ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { + struct rq *rq, *this_rq; unsigned long flags; int this_cpu, cpu; - runqueue_t *rq, *this_rq; rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); @@ -1662,7 +1661,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) void fastcall sched_exit(struct task_struct *p) { unsigned long flags; - runqueue_t *rq; + struct rq *rq; /* * If the child was a (relative-) CPU hog then decrease @@ -1693,7 +1692,7 @@ void fastcall sched_exit(struct task_struct *p) * prepare_task_switch sets up locking and calls architecture specific * hooks. */ -static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) +static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) { prepare_lock_switch(rq, next); prepare_arch_switch(next); @@ -1714,7 +1713,7 @@ static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) +static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; @@ -1755,7 +1754,8 @@ static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - runqueue_t *rq = this_rq(); + struct rq *rq = this_rq(); + finish_task_switch(rq, prev); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ @@ -1770,7 +1770,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) * thread's register state. */ static inline struct task_struct * -context_switch(runqueue_t *rq, struct task_struct *prev, +context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct mm_struct *mm = next->mm; @@ -1883,7 +1883,7 @@ task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd) * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ -static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) +static void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) { @@ -1907,7 +1907,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) * Note this does not restore interrupts like task_rq_unlock, * you need to do so manually after calling. */ -static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +static void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { @@ -1921,7 +1921,7 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) /* * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ -static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) +static void double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) @@ -1944,9 +1944,9 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) */ static void sched_migrate_task(struct task_struct *p, int dest_cpu) { - migration_req_t req; - runqueue_t *rq; + struct migration_req req; unsigned long flags; + struct rq *rq; rq = task_rq_lock(p, &flags); if (!cpu_isset(dest_cpu, p->cpus_allowed) @@ -1987,9 +1987,9 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, - struct task_struct *p, runqueue_t *this_rq, - prio_array_t *this_array, int this_cpu) +static void pull_task(struct rq *src_rq, struct prio_array *src_array, + struct task_struct *p, struct rq *this_rq, + struct prio_array *this_array, int this_cpu) { dequeue_task(p, src_array); dec_nr_running(p, src_rq); @@ -2010,7 +2010,7 @@ static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static -int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, +int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { @@ -2050,14 +2050,14 @@ int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, * * Called with both runqueues locked. */ -static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, +static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_nr_move, unsigned long max_load_move, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, best_prio_seen, skip_for_load; - prio_array_t *array, *dst_array; + struct prio_array *array, *dst_array; struct list_head *head, *curr; struct task_struct *tmp; long rem_load_move; @@ -2212,7 +2212,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, sum_weighted_load = sum_nr_running = avg_load = 0; for_each_cpu_mask(i, group->cpumask) { - runqueue_t *rq = cpu_rq(i); + struct rq *rq = cpu_rq(i); if (*sd_idle && !idle_cpu(i)) *sd_idle = 0; @@ -2428,11 +2428,11 @@ ret: /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ -static runqueue_t * +static struct rq * find_busiest_queue(struct sched_group *group, enum idle_type idle, unsigned long imbalance) { - runqueue_t *busiest = NULL, *rq; + struct rq *busiest = NULL, *rq; unsigned long max_load = 0; int i; @@ -2468,13 +2468,13 @@ static inline unsigned long minus_1_or_zero(unsigned long n) * * Called with this_rq unlocked. */ -static int load_balance(int this_cpu, runqueue_t *this_rq, +static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum idle_type idle) { int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; struct sched_group *group; unsigned long imbalance; - runqueue_t *busiest; + struct rq *busiest; if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) @@ -2596,10 +2596,10 @@ out_one_pinned: * this_rq is locked. */ static int -load_balance_newidle(int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) +load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) { struct sched_group *group; - runqueue_t *busiest = NULL; + struct rq *busiest = NULL; unsigned long imbalance; int nr_moved = 0; int sd_idle = 0; @@ -2657,7 +2657,7 @@ out_balanced: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -static void idle_balance(int this_cpu, runqueue_t *this_rq) +static void idle_balance(int this_cpu, struct rq *this_rq) { struct sched_domain *sd; @@ -2678,11 +2678,11 @@ static void idle_balance(int this_cpu, runqueue_t *this_rq) * * Called with busiest_rq locked. */ -static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) +static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) { - struct sched_domain *sd; - runqueue_t *target_rq; int target_cpu = busiest_rq->push_cpu; + struct sched_domain *sd; + struct rq *target_rq; /* Is there any task to move? */ if (busiest_rq->nr_running <= 1) @@ -2736,7 +2736,7 @@ static inline unsigned long cpu_offset(int cpu) } static void -rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) +rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) { unsigned long this_load, interval, j = cpu_offset(this_cpu); struct sched_domain *sd; @@ -2790,15 +2790,15 @@ rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) /* * on UP we do not need to balance between CPUs: */ -static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) +static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle) { } -static inline void idle_balance(int cpu, runqueue_t *rq) +static inline void idle_balance(int cpu, struct rq *rq) { } #endif -static inline int wake_priority_sleeper(runqueue_t *rq) +static inline int wake_priority_sleeper(struct rq *rq) { int ret = 0; @@ -2826,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat); * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void -update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) +update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) { p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); } @@ -2858,7 +2858,7 @@ unsigned long long current_sched_time(const struct task_struct *p) * increasing number of running tasks. We also ignore the interactivity * if a better static_prio task has expired: */ -static inline int expired_starving(runqueue_t *rq) +static inline int expired_starving(struct rq *rq) { if (rq->curr->static_prio > rq->best_expired_prio) return 1; @@ -2900,7 +2900,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - runqueue_t *rq = this_rq(); + struct rq *rq = this_rq(); cputime64_t tmp; p->stime = cputime_add(p->stime, cputime); @@ -2930,7 +2930,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp = cputime_to_cputime64(steal); - runqueue_t *rq = this_rq(); + struct rq *rq = this_rq(); if (p == rq->idle) { p->stime = cputime_add(p->stime, steal); @@ -2954,7 +2954,7 @@ void scheduler_tick(void) unsigned long long now = sched_clock(); struct task_struct *p = current; int cpu = smp_processor_id(); - runqueue_t *rq = this_rq(); + struct rq *rq = cpu_rq(cpu); update_cpu_clock(p, rq, now); @@ -3043,7 +3043,7 @@ out: } #ifdef CONFIG_SCHED_SMT -static inline void wakeup_busy_runqueue(runqueue_t *rq) +static inline void wakeup_busy_runqueue(struct rq *rq) { /* If an SMT runqueue is sleeping due to priority reasons wake it up */ if (rq->curr == rq->idle && rq->nr_running) @@ -3069,7 +3069,7 @@ static void wake_sleeping_dependent(int this_cpu) return; for_each_cpu_mask(i, sd->span) { - runqueue_t *smt_rq = cpu_rq(i); + struct rq *smt_rq = cpu_rq(i); if (i == this_cpu) continue; @@ -3099,7 +3099,7 @@ smt_slice(struct task_struct *p, struct sched_domain *sd) * need to be obeyed. */ static int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) +dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) { struct sched_domain *tmp, *sd = NULL; int ret = 0, i; @@ -3120,7 +3120,7 @@ dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) for_each_cpu_mask(i, sd->span) { struct task_struct *smt_curr; - runqueue_t *smt_rq; + struct rq *smt_rq; if (i == this_cpu) continue; @@ -3166,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu) { } static inline int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) +dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) { return 0; } @@ -3221,13 +3221,13 @@ static inline int interactive_sleep(enum sleep_type sleep_type) asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; + struct prio_array *array; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; - prio_array_t *array; long *switch_count; - runqueue_t *rq; + struct rq *rq; /* * Test if we are atomic. Since do_exit() needs to call into @@ -3787,9 +3787,9 @@ EXPORT_SYMBOL(sleep_on_timeout); */ void rt_mutex_setprio(struct task_struct *p, int prio) { + struct prio_array *array; unsigned long flags; - prio_array_t *array; - runqueue_t *rq; + struct rq *rq; int oldprio; BUG_ON(prio < 0 || prio > MAX_PRIO); @@ -3828,10 +3828,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) void set_user_nice(struct task_struct *p, long nice) { + struct prio_array *array; int old_prio, delta; unsigned long flags; - prio_array_t *array; - runqueue_t *rq; + struct rq *rq; if (TASK_NICE(p) == nice || nice < -20 || nice > 19) return; @@ -4012,9 +4012,9 @@ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { int retval, oldprio, oldpolicy = -1; - prio_array_t *array; + struct prio_array *array; unsigned long flags; - runqueue_t *rq; + struct rq *rq; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); @@ -4376,9 +4376,8 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, */ asmlinkage long sys_sched_yield(void) { - runqueue_t *rq = this_rq_lock(); - prio_array_t *array = current->array; - prio_array_t *target = rq->expired; + struct rq *rq = this_rq_lock(); + struct prio_array *array = current->array, *target = rq->expired; schedstat_inc(rq, yld_cnt); /* @@ -4525,7 +4524,7 @@ EXPORT_SYMBOL(yield); */ void __sched io_schedule(void) { - struct runqueue *rq = &__raw_get_cpu_var(runqueues); + struct rq *rq = &__raw_get_cpu_var(runqueues); atomic_inc(&rq->nr_iowait); schedule(); @@ -4535,7 +4534,7 @@ EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { - struct runqueue *rq = &__raw_get_cpu_var(runqueues); + struct rq *rq = &__raw_get_cpu_var(runqueues); long ret; atomic_inc(&rq->nr_iowait); @@ -4743,7 +4742,7 @@ void show_state(void) */ void __devinit init_idle(struct task_struct *idle, int cpu) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); unsigned long flags; idle->timestamp = sched_clock(); @@ -4782,7 +4781,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; /* * This is how migration works: * - * 1) we queue a migration_req_t structure in the source CPU's + * 1) we queue a struct migration_req structure in the source CPU's * runqueue and wake up that CPU's migration thread. * 2) we down() the locked semaphore => thread blocks. * 3) migration thread wakes up (implicitly it forces the migrated @@ -4806,9 +4805,9 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; */ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { + struct migration_req req; unsigned long flags; - migration_req_t req; - runqueue_t *rq; + struct rq *rq; int ret = 0; rq = task_rq_lock(p, &flags); @@ -4850,7 +4849,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); */ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { - runqueue_t *rq_dest, *rq_src; + struct rq *rq_dest, *rq_src; int ret = 0; if (unlikely(cpu_is_offline(dest_cpu))) @@ -4896,15 +4895,15 @@ out: static int migration_thread(void *data) { int cpu = (long)data; - runqueue_t *rq; + struct rq *rq; rq = cpu_rq(cpu); BUG_ON(rq->migration_thread != current); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { + struct migration_req *req; struct list_head *head; - migration_req_t *req; try_to_freeze(); @@ -4928,7 +4927,7 @@ static int migration_thread(void *data) set_current_state(TASK_INTERRUPTIBLE); continue; } - req = list_entry(head->next, migration_req_t, list); + req = list_entry(head->next, struct migration_req, list); list_del_init(head->next); spin_unlock(&rq->lock); @@ -4955,10 +4954,10 @@ wait_to_die: /* Figure out where task on dead CPU should go, use force if neccessary. */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) { - runqueue_t *rq; unsigned long flags; - int dest_cpu; cpumask_t mask; + struct rq *rq; + int dest_cpu; restart: /* On same node? */ @@ -4998,9 +4997,9 @@ restart: * their home CPUs. So we just add the counter to another CPU's counter, * to keep the global sum constant after CPU-down: */ -static void migrate_nr_uninterruptible(runqueue_t *rq_src) +static void migrate_nr_uninterruptible(struct rq *rq_src) { - runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); + struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); unsigned long flags; local_irq_save(flags); @@ -5036,7 +5035,7 @@ static void migrate_live_tasks(int src_cpu) void sched_idle_next(void) { int this_cpu = smp_processor_id(); - runqueue_t *rq = cpu_rq(this_cpu); + struct rq *rq = cpu_rq(this_cpu); struct task_struct *p = rq->idle; unsigned long flags; @@ -5074,7 +5073,7 @@ void idle_task_exit(void) static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { - struct runqueue *rq = cpu_rq(dead_cpu); + struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); @@ -5099,7 +5098,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) /* release_task() removes task from tasklist, so we won't find dead tasks. */ static void migrate_dead_tasks(unsigned int dead_cpu) { - struct runqueue *rq = cpu_rq(dead_cpu); + struct rq *rq = cpu_rq(dead_cpu); unsigned int arr, i; for (arr = 0; arr < 2; arr++) { @@ -5123,8 +5122,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct task_struct *p; int cpu = (long)hcpu; - struct runqueue *rq; unsigned long flags; + struct rq *rq; switch (action) { case CPU_UP_PREPARE: @@ -5176,9 +5175,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) * the requestors. */ spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { - migration_req_t *req; + struct migration_req *req; + req = list_entry(rq->migration_queue.next, - migration_req_t, list); + struct migration_req, list); list_del_init(&req->list); complete(&req->done); } @@ -5361,7 +5361,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) */ static void cpu_attach_domain(struct sched_domain *sd, int cpu) { - runqueue_t *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ @@ -6690,8 +6690,8 @@ void __init sched_init(void) int i, j, k; for_each_possible_cpu(i) { - prio_array_t *array; - runqueue_t *rq; + struct prio_array *array; + struct rq *rq; rq = cpu_rq(i); spin_lock_init(&rq->lock); @@ -6764,10 +6764,10 @@ EXPORT_SYMBOL(__might_sleep); #ifdef CONFIG_MAGIC_SYSRQ void normalize_rt_tasks(void) { + struct prio_array *array; struct task_struct *p; - prio_array_t *array; unsigned long flags; - runqueue_t *rq; + struct rq *rq; read_lock_irq(&tasklist_lock); for_each_process(p) { -- cgit v1.2.3 From fe4ada2d6f0b746246e9b5bf0f4f2e4d3a07d26e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Jul 2006 19:44:51 -0700 Subject: [IOAT]: fix header file kernel-doc Fix kernel-doc problems in include/linux/dmaengine.h: - add some fields/parameters - expand some descriptions - fix typos Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- include/linux/dmaengine.h | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 272010a6078a..c94d8f1d62e5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -44,7 +44,7 @@ enum dma_event { }; /** - * typedef dma_cookie_t + * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ @@ -80,14 +80,14 @@ struct dma_chan_percpu { /** * struct dma_chan - devices supply DMA channels, clients use them - * @client: ptr to the client user of this chan, will be NULL when unused - * @device: ptr to the dma device who supplies this channel, always !NULL + * @client: ptr to the client user of this chan, will be %NULL when unused + * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client - * @chan_id: - * @class_dev: + * @chan_id: channel ID for sysfs + * @class_dev: class device for sysfs * @refcount: kref, used in "bigref" slow-mode - * @slow_ref: - * @rcu: + * @slow_ref: indicates that the DMA channel is free + * @rcu: the DMA channel's RCU head * @client_node: used to add this to the client chan list * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu @@ -162,10 +162,17 @@ struct dma_client { * @chancnt: how many DMA channels are supported * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list - * @refcount: - * @done: - * @dev_id: - * Other func ptrs: used to make use of this device's capabilities + * @refcount: reference count + * @done: IO completion struct + * @dev_id: unique device ID + * @device_alloc_chan_resources: allocate resources and return the + * number of allocated descriptors + * @device_free_chan_resources: release DMA channel's resources + * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer + * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page + * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset + * @device_memcpy_complete: poll the status of an IOAT DMA transaction + * @device_memcpy_issue_pending: push appended descriptors to hardware */ struct dma_device { @@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client, * Both @dest and @src must be mappable to a bus address according to the * DMA mapping API rules for streaming mappings. * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages) + * user space pages). */ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len) @@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page * @chan: DMA channel to offload copy to * @page: destination page * @offset: offset in page to copy to @@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to - * @dest_page: destination page + * @dest_pg: destination page * @dest_off: offset in page to copy to - * @src_page: source page + * @src_pg: source page * @src_off: offset in page to copy from * @len: length * * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus * address according to the DMA mapping API rules for streaming mappings. * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages) + * (kernel memory or locked user space pages). */ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, unsigned int dest_off, struct page *src_pg, @@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, /** * dma_async_memcpy_issue_pending - flush pending copies to HW - * @chan: + * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. -- cgit v1.2.3 From 4bdbf6c033ba05bff65f69989baccd7103c5a530 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Jul 2006 19:47:27 -0700 Subject: [NET]: add+use poison defines Add and use poison defines in net/. Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- include/linux/poison.h | 4 ++++ net/atm/clip.c | 3 ++- net/ipv6/netfilter/ip6_tables.c | 3 ++- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/poison.h b/include/linux/poison.h index a5347c02432e..05d7d0fd6bdb 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -45,6 +45,10 @@ /********** drivers/atm/ **********/ #define ATM_POISON_FREE 0x12 +/********** net/ **********/ +#define NEIGHBOR_DEAD 0xdeadbeef +#define NETFILTER_LINK_POISON 0xdead57ac + /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 diff --git a/net/atm/clip.c b/net/atm/clip.c index 87a454f5c89c..121bf6f49148 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -23,6 +23,7 @@ #include /* for IFF_UP */ #include #include +#include #include #include #include @@ -266,7 +267,7 @@ static void clip_neigh_destroy(struct neighbour *neigh) DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); if (NEIGH2ENTRY(neigh)->vccs) printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); - NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef; + NEIGH2ENTRY(neigh)->vccs = (void *) NEIGHBOR_DEAD; } static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7ef143c0ebf6..f26898b00347 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -376,7 +377,7 @@ ip6t_do_table(struct sk_buff **pskb, } while (!hotdrop); #ifdef CONFIG_NETFILTER_DEBUG - ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac; + ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; #endif read_unlock_bh(&table->lock); -- cgit v1.2.3 From 3c6b377321678c649f9b3c66da0149975c614102 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Jul 2006 19:48:25 -0700 Subject: [ATM]: add+use poison defines ATM: add and use POISON define values. Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- drivers/atm/ambassador.c | 3 ++- drivers/atm/idt77252.c | 3 ++- include/linux/poison.h | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index d3b426313a41..4521a249dd56 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -1995,7 +1996,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { } i += 1; } - if (*pointer == 0xdeadbeef) { + if (*pointer == ATM_POISON) { return loader_start (lb, dev, ucode_start); } else { // cast needed as there is no %? for pointer differnces diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 5d1c6c95262c..b0369bb20f08 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -35,6 +35,7 @@ static char const rcsid[] = #include #include +#include #include #include #include @@ -3657,7 +3658,7 @@ probe_sram(struct idt77252_dev *card) writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD); for (addr = 0x4000; addr < 0x80000; addr += 0x4000) { - writel(0xdeadbeef, SAR_REG_DR0); + writel(ATM_POISON, SAR_REG_DR0); writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD); writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD); diff --git a/include/linux/poison.h b/include/linux/poison.h index 05d7d0fd6bdb..3e628f990fdf 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -44,6 +44,7 @@ /********** drivers/atm/ **********/ #define ATM_POISON_FREE 0x12 +#define ATM_POISON 0xdeadbeef /********** net/ **********/ #define NEIGHBOR_DEAD 0xdeadbeef -- cgit v1.2.3 From 7ad7153b051d9628ecd6a336b543ea6ef099bd2c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 4 Jul 2006 14:00:06 -0700 Subject: Fix up headers_install wrt devfs removal No devfs_fs.h header any more.. Signed-off-by: Linus Torvalds --- include/linux/Kbuild | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index f7252be57702..2b8a7d68fae3 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -8,7 +8,7 @@ header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \ atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \ awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \ bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \ - consolemap.h cycx_cfm.h devfs_fs.h dm-ioctl.h dn.h dqblk_v1.h \ + consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \ dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \ fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \ fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \ -- cgit v1.2.3